add apr_pool_mutex_set() to our apr to fix thread-saftey issue

git-svn-id: http://svn.freeswitch.org/svn/freeswitch/trunk@12672 d0543943-73ff-0310-b7d9-9358b9ac24b2
This commit is contained in:
Anthony Minessale 2009-03-20 01:24:45 +00:00
parent bf5833650c
commit ffb1cb4f69
2 changed files with 65 additions and 14 deletions

View File

@ -411,6 +411,17 @@ APR_DECLARE(int) apr_pool_is_ancestor(apr_pool_t *a, apr_pool_t *b);
*/
APR_DECLARE(void) apr_pool_tag(apr_pool_t *pool, const char *tag);
#if APR_HAS_THREADS
/**
* Add a mutex to a pool to make it suitable to use from multiple threads.
* @param pool The pool to add the mutex to
* @param mutex The mutex
* @remark The mutex does not protect the destroy operation just the low level allocs.
*/
APR_DECLARE(void) apr_pool_mutex_set(apr_pool_t *pool,
apr_thread_mutex_t *mutex);
#endif
/*
* User data management
@ -435,6 +446,7 @@ APR_DECLARE(void) apr_pool_tag(apr_pool_t *pool, const char *tag);
* key names is a typical way to help ensure this uniqueness.
*
*/
APR_DECLARE(apr_status_t) apr_pool_userdata_set(
const void *data,
const char *key,

View File

@ -449,7 +449,9 @@ struct apr_pool_t {
apr_abortfunc_t abort_fn;
apr_hash_t *user_data;
const char *tag;
#if APR_HAS_THREADS
apr_thread_mutex_t *user_mutex;
#endif
#if !APR_POOL_DEBUG
apr_memnode_t *active;
apr_memnode_t *self; /* The node containing the pool itself */
@ -471,6 +473,7 @@ struct apr_pool_t {
#endif /* APR_POOL_DEBUG */
#ifdef NETWARE
apr_os_proc_t owner_proc;
#endif /* defined(NETWARE) */
};
@ -594,9 +597,11 @@ APR_DECLARE(void) apr_pool_terminate(void)
APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size)
{
apr_memnode_t *active, *node;
void *mem;
void *mem = NULL;
apr_size_t free_index;
#if APR_HAS_THREADS
if (pool->user_mutex) apr_thread_mutex_lock(pool->user_mutex);
#endif
size = APR_ALIGN_DEFAULT(size);
active = pool->active;
@ -605,7 +610,7 @@ APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size)
mem = active->first_avail;
active->first_avail += size;
return mem;
goto end;
}
node = active->next;
@ -617,7 +622,8 @@ APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size)
if (pool->abort_fn)
pool->abort_fn(APR_ENOMEM);
return NULL;
mem = NULL;
goto end;
}
}
@ -636,7 +642,7 @@ APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size)
active->free_index = (APR_UINT32_TRUNC_CAST)free_index;
node = active->next;
if (free_index >= node->free_index)
return mem;
goto end;
do {
node = node->next;
@ -646,6 +652,10 @@ APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size)
list_remove(active);
list_insert(active, node);
end:
#if APR_HAS_THREADS
if (pool->user_mutex) apr_thread_mutex_unlock(pool->user_mutex);
#endif
return mem;
}
@ -678,7 +688,9 @@ APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size)
APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool)
{
apr_memnode_t *active;
#if APR_HAS_THREADS
if (pool->user_mutex) apr_thread_mutex_lock(pool->user_mutex);
#endif
/* Destroy the subpools. The subpools will detach themselves from
* this pool thus this loop is safe and easy.
*/
@ -704,14 +716,27 @@ APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool)
active->first_avail = pool->self_first_avail;
if (active->next == active)
return;
goto end;
*active->ref = NULL;
allocator_free(pool->allocator, active->next);
active->next = active;
active->ref = &active->next;
end:
#if APR_HAS_THREADS
if (pool->user_mutex) apr_thread_mutex_unlock(pool->user_mutex);
#endif
}
#if APR_HAS_THREADS
APR_DECLARE(void) apr_pool_mutex_set(apr_pool_t *pool,
apr_thread_mutex_t *mutex)
{
pool->user_mutex = mutex;
}
#endif
APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool)
{
apr_memnode_t *active;
@ -820,7 +845,9 @@ APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
pool->subprocesses = NULL;
pool->user_data = NULL;
pool->tag = NULL;
#if APR_HAS_THREADS
pool->user_mutex = NULL;
#endif
#ifdef NETWARE
pool->owner_proc = (apr_os_proc_t)getnlmhandle();
#endif /* defined(NETWARE) */
@ -963,6 +990,10 @@ APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
apr_memnode_t *active, *node;
apr_size_t free_index;
#if APR_HAS_THREADS
if (pool->user_mutex) apr_thread_mutex_lock(pool->user_mutex);
#endif
ps.node = active = pool->active;
ps.pool = pool;
ps.vbuff.curpos = ps.node->first_avail;
@ -981,7 +1012,8 @@ APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
pool->abort_fn(APR_ENOMEM);
}
return NULL;
strp = NULL;
goto end;
}
}
@ -989,7 +1021,8 @@ APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
if (pool->abort_fn)
pool->abort_fn(APR_ENOMEM);
return NULL;
strp = NULL;
goto end;
}
strp = ps.vbuff.curpos;
@ -1006,8 +1039,8 @@ APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
/*
* Link the node in if it's a new one
*/
if (!ps.got_a_new_node)
return strp;
if (!ps.got_a_new_node)
goto end;
active = pool->active;
node = ps.node;
@ -1025,7 +1058,7 @@ APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
node = active->next;
if (free_index >= node->free_index)
return strp;
goto end;
do {
node = node->next;
@ -1035,6 +1068,12 @@ APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
list_remove(active);
list_insert(active, node);
end:
#if APR_HAS_THREADS
if (pool->user_mutex) apr_thread_mutex_unlock(pool->user_mutex);
#endif
return strp;
}