Index: memory/unix/apr_pools.c
===================================================================
RCS file: /home/cvs/apr/memory/unix/apr_pools.c,v
retrieving revision 1.150
diff -u -r1.150 apr_pools.c
--- memory/unix/apr_pools.c	6 Feb 2002 21:01:36 -0000	1.150
+++ memory/unix/apr_pools.c	7 Feb 2002 01:36:45 -0000
@@ -122,7 +122,9 @@
 
 struct node_t {
     node_t      *next;
+    node_t     **ref;
     apr_uint32_t index;
+    apr_uint32_t free_index;
     char        *first_avail;
     char        *endp;
 };
@@ -205,7 +207,7 @@
 static apr_pool_t  *global_pool = NULL;
 
 #if !APR_POOL_DEBUG
-static allocator_t  global_allocator = { 
+static allocator_t  global_allocator = {
     0,          /* max_index */
 #if APR_HAS_THREADS
     NULL,       /* mutex */
@@ -239,15 +241,15 @@
 
     if (apr_pools_initialized++)
         return APR_SUCCESS;
-    
-    memset(&global_allocator, 0, sizeof(global_allocator));
+
+    memset(&global_allocator, 0, SIZEOF_ALLOCATOR_T);
 
     if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL, APR_POOL_FDEFAULT)) != APR_SUCCESS) {
         return rv;
     }
-    
-#if APR_HAS_THREADS    
-    if ((rv = apr_thread_mutex_create(&global_allocator.mutex, 
+
+#if APR_HAS_THREADS
+    if ((rv = apr_thread_mutex_create(&global_allocator.mutex,
                   APR_THREAD_MUTEX_DEFAULT, global_pool)) != APR_SUCCESS) {
         return rv;
     }
@@ -255,7 +257,7 @@
 
     global_allocator.owner = global_pool;
     apr_pools_initialized = 1;
-    
+
     return APR_SUCCESS;
 }
 
@@ -265,11 +267,11 @@
         return;
 
     apr_pools_initialized = 0;
-    
+
     apr_pool_destroy(global_pool); /* This will also destroy the mutex */
     global_pool = NULL;
 
-    memset(&global_allocator, 0, sizeof(global_allocator));
+    memset(&global_allocator, 0, SIZEOF_ALLOCATOR_T);
 }
 
 #ifdef NETWARE
@@ -298,7 +300,7 @@
 static APR_INLINE node_t *node_malloc(allocator_t *allocator, apr_size_t size)
 {
     node_t *node, **ref;
-    apr_uint32_t i, index, max_index; 
+    apr_uint32_t i, index, max_index;
 
     /* Round up the block size to the next boundary, but always
      * allocate at least a certain size (MIN_ALLOC).
@@ -320,7 +322,7 @@
         if (allocator->mutex)
             apr_thread_mutex_lock(allocator->mutex);
 #endif /* APR_HAS_THREADS */
-        
+
         /* Walk the free list to see if there are
          * any nodes on it of the requested size
          *
@@ -401,13 +403,13 @@
 
             return node;
         }
-        
+
 #if APR_HAS_THREADS
         if (allocator->mutex)
             apr_thread_mutex_unlock(allocator->mutex);
 #endif /* APR_HAS_THREADS */
     }
-    
+
     /* If we haven't got a suitable node, malloc a new one
      * and initialize it.
      */
@@ -473,6 +475,7 @@
     node_t *active, *node;
     void *mem;
     char *endp;
+    apr_uint32_t free_index;
 
     size = APR_ALIGN_DEFAULT(size);
     active = pool->active;
@@ -482,61 +485,73 @@
     if (endp < active->endp) {
         mem = active->first_avail;
         active->first_avail = endp;
-        
+
         return mem;
     }
 
-    if ((node = node_malloc(pool->allocator, size)) == NULL) {
-        if (pool->abort_fn)
-            pool->abort_fn(APR_ENOMEM);
+    node = active->next;
+    endp = node->first_avail + size;
+    if (endp < node->endp) {
+        *node->ref = node->next;
+        node->next->ref = node->ref;
+    }
+    else {
+        if ((node = node_malloc(pool->allocator, size)) == NULL) {
+            if (pool->abort_fn)
+                pool->abort_fn(APR_ENOMEM);
 
-        return NULL;
+            return NULL;
+        }
+
+        endp = node->first_avail + size;
     }
 
-    active->next = pool->active = node; 
+    node->free_index = 0;
 
     mem = node->first_avail;
-    node->first_avail += size;
+    node->first_avail = endp;
+
+    node->ref = active->ref;
+    *node->ref = node;
+    node->next = active;
+    active->ref = &node->next;
+
+    pool->active = node;
+
+    free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
+                            BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
+
+    active->free_index = free_index;
+    node = active->next;
+    if (free_index >= node->free_index)
+        return mem;
+
+    do {
+        node = node->next;
+    }
+    while (free_index < node->free_index);
+
+    *active->ref = active->next;
+    active->next->ref = active->ref;
+
+    active->ref = node->ref;
+    *active->ref = active;
+    active->next = node;
+    node->ref = &active->next;
 
     return mem;
 }
 
 APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size)
 {
-    node_t *active, *node;
     void *mem;
-    char *endp;
 
     size = APR_ALIGN_DEFAULT(size);
-    active = pool->active;
-
-    /* If the active node has enough bytes left, use it. */
-    endp = active->first_avail + size;
-    if (endp < active->endp) {
-        mem = active->first_avail;
-        active->first_avail = endp;
 
+    mem = apr_palloc(pool, size);
+    if (mem != NULL)
         memset(mem, 0, size);
-        
-        return mem;
-    }
-
-    if ((node = node_malloc(pool->allocator, size)) == NULL) {
-        active->first_avail = active->endp;
-
-        if (pool->abort_fn)
-            pool->abort_fn(APR_ENOMEM);
-
-        return NULL;
-    }
-
-    active->next = pool->active = node; 
 
-    mem = node->first_avail;
-    node->first_avail += size;
- 
-    memset(mem, 0, size);
- 
     return mem;
 }
 
@@ -569,14 +584,16 @@
     /* Find the node attached to the pool structure, reset it, make
      * it the active node and free the rest of the nodes.
      */
-    active = pool->active = pool->self; 
+    active = pool->active = pool->self;
     active->first_avail = pool->self_first_avail;
-    
-    if (active->next == NULL)
+
+    if (active->next == active)
         return;
-    
+
+    *active->ref = NULL;
     node_free(pool->allocator, active->next);
-    active->next = NULL;
+    active->next = active;
+    active->ref = &active->next;
 }
 
 APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool)
@@ -614,12 +631,13 @@
             apr_thread_mutex_unlock(mutex);
 #endif
     }
- 
+
     /* Find the block attached to the pool structure.  Save a copy of the
      * allocator pointer, because the pool struct soon will be no more.
      */
     allocator = pool->allocator;
     active = pool->self;
+    *active->ref = NULL;
 
     /* If this pool happens to be the owner of the allocator, free 
      * everything in the allocator (that includes the pool struct
@@ -675,11 +693,14 @@
         return APR_ENOMEM;
     }
 
+    node->next = node;
+    node->ref = &node->next;
+
     if ((flags & APR_POOL_FNEW_ALLOCATOR) == APR_POOL_FNEW_ALLOCATOR) {
         new_allocator = (allocator_t *)node->first_avail;
         pool = (apr_pool_t *)((char *)new_allocator + SIZEOF_ALLOCATOR_T);
         node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T;
-        
+
         memset(new_allocator, 0, SIZEOF_ALLOCATOR_T);
         new_allocator->owner = pool;
 
@@ -707,7 +728,7 @@
     else {
         pool = (apr_pool_t *)node->first_avail;
         node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T;
-    
+
         pool->allocator = allocator;
         pool->active = pool->self = node;
         pool->abort_fn = abort_fn;
@@ -770,7 +791,7 @@
 struct psprintf_data {
     apr_vformatter_buff_t vbuff;
     node_t               *node;
-    allocator_t          *allocator;
+    apr_pool_t           *pool;
     apr_byte_t            got_a_new_node;
     node_t               *free;
 };
@@ -779,29 +800,70 @@
 {
     struct psprintf_data *ps = (struct psprintf_data *)vbuff;
     node_t *node, *active;
-    apr_size_t cur_len;
+    apr_size_t cur_len, size;
     char *strp;
-    allocator_t *allocator;
+    apr_pool_t *pool;
+    apr_uint32_t free_index;
 
-    allocator = ps->allocator;
-    node = ps->node;
+    pool = ps->pool;
+    active = ps->node;
     strp = ps->vbuff.curpos;
-    cur_len = strp - node->first_avail;
+    cur_len = strp - active->first_avail;
+    size = cur_len << 1;
+
+    node = active->next;
+    if (!ps->got_a_new_node && node->first_avail + size < node->endp) {
+        *node->ref = node->next;
+        node->next->ref = node->ref;
+
+        node->ref = active->ref;
+        *node->ref = node;
+        node->next = active;
+        active->ref = &node->next;
+
+        node->free_index = 0;
+
+        pool->active = node;
+
+        free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
+                                BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
+
+        active->free_index = free_index;
+        node = active->next;
+        if (free_index < node->free_index) {
+            do {
+                node = node->next;
+            }
+            while (free_index < node->free_index);
+
+            *active->ref = active->next;
+            active->next->ref = active->ref;
+
+            active->ref = node->ref;
+            *active->ref = active;
+            active->next = node;
+            node->ref = &active->next;
+        }
 
-    if ((active = node_malloc(allocator, cur_len << 1)) == NULL)
-        return -1;
+        node = pool->active;
+    }
+    else {
+        if ((node = node_malloc(pool->allocator, size)) == NULL)
+            return -1;
 
-    memcpy(active->first_avail, node->first_avail, cur_len);
+        if (ps->got_a_new_node) {
+            active->next = ps->free;
+            ps->free = node; 
+        }
 
-    if (ps->got_a_new_node) {
-        node->next = ps->free;
-        ps->free = node; 
+        ps->got_a_new_node = 1;
     }
 
-    ps->node = active;
-    ps->vbuff.curpos = active->first_avail + cur_len;
-    ps->vbuff.endpos = active->endp - 1; /* Save a byte for NUL terminator */
-    ps->got_a_new_node = 1;
+    memcpy(node->first_avail, active->first_avail, cur_len);
+
+    ps->node = node;
+    ps->vbuff.curpos = node->first_avail + cur_len;
+    ps->vbuff.endpos = node->endp - 1; /* Save a byte for NUL terminator */
 
     return 0;
 }
@@ -811,10 +873,11 @@
     struct psprintf_data ps;
     char *strp;
     apr_size_t size;
-    node_t *active;
+    node_t *active, *node;
+    apr_uint32_t free_index;
 
     ps.node = active = pool->active;
-    ps.allocator = pool->allocator;
+    ps.pool = pool;
     ps.vbuff.curpos  = ps.node->first_avail;
 
     /* Save a byte for the NUL terminator */
@@ -837,15 +900,48 @@
     strp = ps.node->first_avail;
     ps.node->first_avail += size;
 
-    /* 
-     * Link the node in if it's a new one 
+    if (ps.free)
+        node_free(pool->allocator, ps.free);
+
+    /*
+     * Link the node in if it's a new one
      */
-    if (ps.got_a_new_node) {
-        active->next = pool->active = ps.node;
+    if (!ps.got_a_new_node)
+        return strp;
+
+    active = pool->active;
+    node = ps.node;
+
+    node->free_index = 0;
+
+    node->ref = active->ref;
+    *node->ref = node;
+    node->next = active;
+    active->ref = &node->next;
+
+    pool->active = node;
+
+    free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
+                            BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
+
+    active->free_index = free_index;
+    node = active->next;
+
+    if (free_index >= node->free_index)
+        return strp;
+
+    do {
+        node = node->next;
     }
+    while (free_index < node->free_index);
 
-    if (ps.free)
-        node_free(ps.allocator, ps.free);
+    *active->ref = active->next;
+    active->next->ref = active->ref;
+
+    active->ref = node->ref;
+    *active->ref = active;
+    active->next = node;
+    node->ref = &active->next;
 
     return strp;
 }

