xref: /aoo42x/main/sal/rtl/source/alloc_cache.c (revision 647f063d)
1 /**************************************************************
2  *
3  * Licensed to the Apache Software Foundation (ASF) under one
4  * or more contributor license agreements.  See the NOTICE file
5  * distributed with this work for additional information
6  * regarding copyright ownership.  The ASF licenses this file
7  * to you under the Apache License, Version 2.0 (the
8  * "License"); you may not use this file except in compliance
9  * with the License.  You may obtain a copy of the License at
10  *
11  *   http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing,
14  * software distributed under the License is distributed on an
15  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16  * KIND, either express or implied.  See the License for the
17  * specific language governing permissions and limitations
18  * under the License.
19  *
20  *************************************************************/
21 
22 
23 
24 #include "alloc_cache.h"
25 #include "alloc_impl.h"
26 #include "alloc_arena.h"
27 #include "internal/once.h"
28 #include "sal/macros.h"
29 #include "osl/diagnose.h"
30 
31 #ifndef INCLUDED_STRING_H
32 #include <string.h>
33 #endif
34 
35 #ifndef INCLUDED_STDIO_H
36 #include <stdio.h>
37 #endif
38 
39 #ifdef OS2
40 #undef OSL_TRACE
41 #define OSL_TRACE                  1 ? ((void)0) : _OSL_GLOBAL osl_trace
42 #endif
43 
44 /* ================================================================= *
45  *
46  * cache internals.
47  *
48  * ================================================================= */
49 
50 /** g_cache_list
51  *  @internal
52  */
53 struct rtl_cache_list_st
54 {
55 	rtl_memory_lock_type m_lock;
56 	rtl_cache_type       m_cache_head;
57 
58 #if defined(SAL_UNX) || defined(SAL_OS2)
59 	pthread_t            m_update_thread;
60 	pthread_cond_t       m_update_cond;
61 #elif defined(SAL_W32)
62 	HANDLE               m_update_thread;
63 	HANDLE               m_update_cond;
64 #endif /* SAL_UNX || SAL_W32 */
65 	int                  m_update_done;
66 };
67 
68 static struct rtl_cache_list_st g_cache_list;
69 
70 
71 /** gp_cache_arena
72  *  provided for cache_type allocations, and hash_table resizing.
73  *
74  *  @internal
75  */
76 static rtl_arena_type * gp_cache_arena = 0;
77 
78 
79 /** gp_cache_magazine_cache
80  *  @internal
81  */
82 static rtl_cache_type * gp_cache_magazine_cache = 0;
83 
84 
85 /** gp_cache_slab_cache
86  *  @internal
87  */
88 static rtl_cache_type * gp_cache_slab_cache = 0;
89 
90 
91 /** gp_cache_bufctl_cache
92  *  @internal
93  */
94 static rtl_cache_type * gp_cache_bufctl_cache = 0;
95 
96 
97 /** rtl_cache_init()
98  *  @internal
99  */
100 static int
101 rtl_cache_init (void);
102 
103 
104 /* ================================================================= */
105 
106 /** RTL_CACHE_HASH_INDEX()
107  */
108 #define	RTL_CACHE_HASH_INDEX_IMPL(a, s, q, m) \
109  	((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
110 
111 #define	RTL_CACHE_HASH_INDEX(cache, addr) \
112     RTL_CACHE_HASH_INDEX_IMPL((addr), (cache)->m_hash_shift, (cache)->m_type_shift, ((cache)->m_hash_size - 1))
113 
114 
115 /** rtl_cache_hash_rescale()
116  */
117 static void
118 rtl_cache_hash_rescale (
119 	rtl_cache_type * cache,
120 	sal_Size         new_size
121 )
122 {
123 	rtl_cache_bufctl_type ** new_table;
124 	sal_Size                 new_bytes;
125 
126 	new_bytes = new_size * sizeof(rtl_cache_bufctl_type*);
127 	new_table = (rtl_cache_bufctl_type**)rtl_arena_alloc(gp_cache_arena, &new_bytes);
128 
129 	if (new_table != 0)
130 	{
131 		rtl_cache_bufctl_type ** old_table;
132 		sal_Size                 old_size, i;
133 
134 		memset (new_table, 0, new_bytes);
135 
136 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
137 
138 		old_table = cache->m_hash_table;
139 		old_size  = cache->m_hash_size;
140 
141 		OSL_TRACE(
142 			"rtl_cache_hash_rescale(\"%s\"): "
143 			"nbuf: % " PRIu64 " (ave: %" PRIu64 "), frees: %" PRIu64 " "
144 			"[old_size: %lu, new_size: %lu]",
145 			cache->m_name,
146 			cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free,
147 			(cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free) >> cache->m_hash_shift,
148 			cache->m_slab_stats.m_free,
149 			old_size, new_size);
150 
151 		cache->m_hash_table = new_table;
152 		cache->m_hash_size  = new_size;
153 		cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
154 
155 		for (i = 0; i < old_size; i++)
156 		{
157 			rtl_cache_bufctl_type * curr = old_table[i];
158 			while (curr != 0)
159 			{
160 				rtl_cache_bufctl_type  * next = curr->m_next;
161 				rtl_cache_bufctl_type ** head;
162 
163 				head = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, curr->m_addr)]);
164 				curr->m_next = (*head);
165 				(*head) = curr;
166 
167 				curr = next;
168 			}
169 			old_table[i] = 0;
170 		}
171 
172 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
173 
174 		if (old_table != cache->m_hash_table_0)
175 		{
176 			sal_Size old_bytes = old_size * sizeof(rtl_cache_bufctl_type*);
177 			rtl_arena_free (gp_cache_arena, old_table, old_bytes);
178 		}
179 	}
180 }
181 
182 /** rtl_cache_hash_insert()
183  */
184 static RTL_MEMORY_INLINE sal_uIntPtr
185 rtl_cache_hash_insert (
186 	rtl_cache_type *        cache,
187 	rtl_cache_bufctl_type * bufctl
188 )
189 {
190 	rtl_cache_bufctl_type ** ppHead;
191 
192 	ppHead = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, bufctl->m_addr)]);
193 
194 	bufctl->m_next = (*ppHead);
195 	(*ppHead) = bufctl;
196 
197 	return (bufctl->m_addr);
198 }
199 
200 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
201 #pragma inline(rtl_cache_hash_insert)
202 #endif /* __SUNPRO_C */
203 
204 
205 /** rtl_cache_hash_remove()
206  */
207 static rtl_cache_bufctl_type *
208 rtl_cache_hash_remove (
209 	rtl_cache_type * cache,
210 	sal_uIntPtr      addr
211 )
212 {
213 	rtl_cache_bufctl_type ** ppHead;
214 	rtl_cache_bufctl_type  * bufctl;
215 	sal_Size                 lookups = 0;
216 
217 	ppHead = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, addr)]);
218 	while ((bufctl = *ppHead) != 0)
219 	{
220 		if (bufctl->m_addr == addr)
221 		{
222 			*ppHead = bufctl->m_next, bufctl->m_next = 0;
223 			break;
224 		}
225 
226 		lookups += 1;
227 		ppHead = &(bufctl->m_next);
228 	}
229 
230 	OSL_ASSERT (bufctl != 0); /* bad free */
231 
232 	if (lookups > 1)
233 	{
234 		sal_Size nbuf = (sal_Size)(cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free);
235 		if (nbuf > 4 * cache->m_hash_size)
236 		{
237 			if (!(cache->m_features & RTL_CACHE_FEATURE_RESCALE))
238 			{
239 				sal_Size ave = nbuf >> cache->m_hash_shift;
240 				sal_Size new_size = cache->m_hash_size << (highbit(ave) - 1);
241 
242 				cache->m_features |= RTL_CACHE_FEATURE_RESCALE;
243 				RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
244 				rtl_cache_hash_rescale (cache, new_size);
245 				RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
246 				cache->m_features &= ~RTL_CACHE_FEATURE_RESCALE;
247 			}
248 		}
249 	}
250 
251 	return (bufctl);
252 }
253 
254 /* ================================================================= */
255 
256 /** RTL_CACHE_SLAB()
257  */
258 #define RTL_CACHE_SLAB(addr, size) \
259     (((rtl_cache_slab_type*)(RTL_MEMORY_P2END((sal_uIntPtr)(addr), (size)))) - 1)
260 
261 
262 /** rtl_cache_slab_constructor()
263  */
264 static int
265 rtl_cache_slab_constructor (void * obj, void * arg)
266 {
267 	rtl_cache_slab_type * slab = (rtl_cache_slab_type*)(obj);
268 
269     (void) arg; /* unused */
270 
271 	QUEUE_START_NAMED(slab, slab_);
272 	slab->m_ntypes = 0;
273 
274 	return (1);
275 }
276 
277 
278 /** rtl_cache_slab_destructor()
279  */
280 static void
281 rtl_cache_slab_destructor (void * obj, void * arg)
282 {
283 #if OSL_DEBUG_LEVEL == 0
284     (void) obj; /* unused */
285 #else /* OSL_DEBUG_LEVEL */
286 	rtl_cache_slab_type * slab = (rtl_cache_slab_type*)(obj);
287 
288 	/* assure removed from queue(s) */
289 	OSL_ASSERT(QUEUE_STARTED_NAMED(slab, slab_));
290 
291 	/* assure no longer referenced */
292 	OSL_ASSERT(slab->m_ntypes == 0);
293 #endif /* OSL_DEBUG_LEVEL */
294 
295     (void) arg; /* unused */
296 }
297 
298 
299 /** rtl_cache_slab_create()
300  *
301  *  @precond cache->m_slab_lock released.
302  */
303 static rtl_cache_slab_type *
304 rtl_cache_slab_create (
305 	rtl_cache_type * cache
306 )
307 {
308 	rtl_cache_slab_type * slab = 0;
309 	void *                addr;
310 	sal_Size              size;
311 
312 	size = cache->m_slab_size;
313 	addr = rtl_arena_alloc (cache->m_source, &size);
314 	if (addr != 0)
315 	{
316 		OSL_ASSERT(size >= cache->m_slab_size);
317 
318 		if (cache->m_features & RTL_CACHE_FEATURE_HASH)
319 		{
320 			/* allocate slab struct from slab cache */
321 			OSL_ASSERT (cache != gp_cache_slab_cache);
322 			slab = (rtl_cache_slab_type*)rtl_cache_alloc (gp_cache_slab_cache);
323 		}
324 		else
325 		{
326 			/* construct embedded slab struct */
327 			slab = RTL_CACHE_SLAB(addr, cache->m_slab_size);
328 			(void) rtl_cache_slab_constructor (slab, 0);
329 		}
330 		if (slab != 0)
331 		{
332 			slab->m_data = (sal_uIntPtr)(addr);
333 
334 			/* dynamic freelist initialization */
335 			slab->m_bp = slab->m_data;
336 			slab->m_sp = 0;
337 		}
338 		else
339 		{
340 			rtl_arena_free (cache->m_source, addr, size);
341 		}
342 	}
343 	return (slab);
344 }
345 
346 
347 /** rtl_cache_slab_destroy()
348  *
349  *  @precond cache->m_slab_lock released.
350  */
351 static void
352 rtl_cache_slab_destroy (
353 	rtl_cache_type *      cache,
354 	rtl_cache_slab_type * slab
355 )
356 {
357 	void *   addr   = (void*)(slab->m_data);
358 	sal_Size refcnt = slab->m_ntypes; slab->m_ntypes = 0;
359 
360 	if (cache->m_features & RTL_CACHE_FEATURE_HASH)
361 	{
362 		/* cleanup bufctl(s) for free buffer(s) */
363 		sal_Size ntypes = (slab->m_bp - slab->m_data) / cache->m_type_size;
364 		for (ntypes -= refcnt; slab->m_sp != 0; ntypes--)
365 		{
366 			rtl_cache_bufctl_type * bufctl = slab->m_sp;
367 
368 			/* pop from freelist */
369 			slab->m_sp = bufctl->m_next, bufctl->m_next = 0;
370 
371 			/* return bufctl struct to bufctl cache */
372 			rtl_cache_free (gp_cache_bufctl_cache, bufctl);
373 		}
374 		OSL_ASSERT(ntypes == 0);
375 
376 		/* return slab struct to slab cache */
377 		rtl_cache_free (gp_cache_slab_cache, slab);
378 	}
379 	else
380 	{
381 		/* destruct embedded slab struct */
382 		rtl_cache_slab_destructor (slab, 0);
383 	}
384 
385 	if ((refcnt == 0) || (cache->m_features & RTL_CACHE_FEATURE_BULKDESTROY))
386 	{
387 		/* free memory */
388 		rtl_arena_free (cache->m_source, addr, cache->m_slab_size);
389 	}
390 }
391 
392 
393 /** rtl_cache_slab_populate()
394  *
395  *  @precond cache->m_slab_lock acquired.
396  */
397 static int
398 rtl_cache_slab_populate (
399 	rtl_cache_type * cache
400 )
401 {
402 	rtl_cache_slab_type * slab;
403 
404 	RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
405 	slab = rtl_cache_slab_create (cache);
406 	RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
407 	if (slab != 0)
408 	{
409 		/* update buffer start addr w/ current color */
410 		slab->m_bp += cache->m_ncolor;
411 
412 		/* update color for next slab */
413 		cache->m_ncolor += cache->m_type_align;
414 		if (cache->m_ncolor > cache->m_ncolor_max)
415 			cache->m_ncolor = 0;
416 
417 		/* update stats */
418 		cache->m_slab_stats.m_mem_total += cache->m_slab_size;
419 
420 		/* insert onto 'free' queue */
421 		QUEUE_INSERT_HEAD_NAMED(&(cache->m_free_head), slab, slab_);
422 	}
423 	return (slab != 0);
424 }
425 
426 /* ================================================================= */
427 
428 /** rtl_cache_slab_alloc()
429  *
430  *  Allocate a buffer from slab layer; used by magazine layer.
431  */
432 static void *
433 rtl_cache_slab_alloc (
434 	rtl_cache_type * cache
435 )
436 {
437 	void                * addr = 0;
438 	rtl_cache_slab_type * head;
439 
440 	RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
441 
442 	head = &(cache->m_free_head);
443 	if ((head->m_slab_next != head) || rtl_cache_slab_populate (cache))
444 	{
445 		rtl_cache_slab_type   * slab;
446 		rtl_cache_bufctl_type * bufctl;
447 
448 		slab = head->m_slab_next;
449 		OSL_ASSERT(slab->m_ntypes < cache->m_ntypes);
450 
451 		if (slab->m_sp == 0)
452 		{
453 			/* initialize bufctl w/ current 'slab->m_bp' */
454 			OSL_ASSERT (slab->m_bp < slab->m_data + cache->m_ntypes * cache->m_type_size + cache->m_ncolor_max);
455 			if (cache->m_features & RTL_CACHE_FEATURE_HASH)
456 			{
457 				/* allocate bufctl */
458 				OSL_ASSERT (cache != gp_cache_bufctl_cache);
459 				bufctl = (rtl_cache_bufctl_type*)rtl_cache_alloc (gp_cache_bufctl_cache);
460 				if (bufctl == 0)
461 				{
462 					/* out of memory */
463 					RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
464 					return (0);
465 				}
466 
467 				bufctl->m_addr = slab->m_bp;
468 				bufctl->m_slab = (sal_uIntPtr)(slab);
469 			}
470 			else
471 			{
472 				/* embedded bufctl */
473 				bufctl = (rtl_cache_bufctl_type*)(slab->m_bp);
474 			}
475 			bufctl->m_next = 0;
476 
477 			/* update 'slab->m_bp' to next free buffer */
478 			slab->m_bp += cache->m_type_size;
479 
480 			/* assign bufctl to freelist */
481 			slab->m_sp = bufctl;
482 		}
483 
484 		/* pop front */
485 		bufctl = slab->m_sp;
486 		slab->m_sp = bufctl->m_next;
487 
488 		/* increment usage, check for full slab */
489 		if ((slab->m_ntypes += 1) == cache->m_ntypes)
490 		{
491 			/* remove from 'free' queue */
492 			QUEUE_REMOVE_NAMED(slab, slab_);
493 
494 			/* insert onto 'used' queue (tail) */
495 			QUEUE_INSERT_TAIL_NAMED(&(cache->m_used_head), slab, slab_);
496 		}
497 
498 		/* update stats */
499 		cache->m_slab_stats.m_alloc     += 1;
500 		cache->m_slab_stats.m_mem_alloc += cache->m_type_size;
501 
502 		if (cache->m_features & RTL_CACHE_FEATURE_HASH)
503 			addr = (void*)rtl_cache_hash_insert (cache, bufctl);
504 		else
505 			addr = bufctl;
506 
507 		/* DEBUG ONLY: mark allocated, undefined */
508 		OSL_DEBUG_ONLY(memset(addr, 0x77777777, cache->m_type_size));
509 		VALGRIND_MEMPOOL_ALLOC(cache, addr, cache->m_type_size);
510 	}
511 
512 	RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
513 	return (addr);
514 }
515 
516 
517 /** rtl_cache_slab_free()
518  *
519  *  Return a buffer to slab layer; used by magazine layer.
520  */
521 static void
522 rtl_cache_slab_free (
523 	rtl_cache_type * cache,
524 	void *           addr
525 )
526 {
527 	rtl_cache_bufctl_type * bufctl;
528 	rtl_cache_slab_type   * slab;
529 
530 	RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
531 
532 	/* DEBUG ONLY: mark unallocated, undefined */
533 	VALGRIND_MEMPOOL_FREE(cache, addr);
534 	/* OSL_DEBUG_ONLY() */ VALGRIND_MAKE_MEM_UNDEFINED(addr, cache->m_type_size);
535     OSL_DEBUG_ONLY(memset(addr, 0x33333333, cache->m_type_size));
536 
537 	/* determine slab from addr */
538 	if (cache->m_features & RTL_CACHE_FEATURE_HASH)
539 	{
540 		bufctl = rtl_cache_hash_remove (cache, (sal_uIntPtr)(addr));
541 		slab = (bufctl != 0) ? (rtl_cache_slab_type*)(bufctl->m_slab) : 0;
542 	}
543 	else
544 	{
545 		/* embedded slab struct */
546 		bufctl = (rtl_cache_bufctl_type*)(addr);
547 		slab = RTL_CACHE_SLAB(addr, cache->m_slab_size);
548 	}
549 
550 	if (slab != 0)
551 	{
552 		/* check for full slab */
553 		if (slab->m_ntypes == cache->m_ntypes)
554 		{
555 			/* remove from 'used' queue */
556 			QUEUE_REMOVE_NAMED(slab, slab_);
557 
558 			/* insert onto 'free' queue (head) */
559 			QUEUE_INSERT_HEAD_NAMED(&(cache->m_free_head), slab, slab_);
560 		}
561 
562 		/* push front */
563 		bufctl->m_next = slab->m_sp;
564 		slab->m_sp = bufctl;
565 
566 		/* update stats */
567 		cache->m_slab_stats.m_free      += 1;
568 		cache->m_slab_stats.m_mem_alloc -= cache->m_type_size;
569 
570 		/* decrement usage, check for empty slab */
571 		if ((slab->m_ntypes -= 1) == 0)
572 		{
573 			/* remove from 'free' queue */
574 			QUEUE_REMOVE_NAMED(slab, slab_);
575 
576 			/* update stats */
577 			cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
578 
579 			/* free 'empty' slab */
580 			RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
581 			rtl_cache_slab_destroy (cache, slab);
582 			return;
583 		}
584 	}
585 
586 	RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
587 }
588 
589 /* ================================================================= */
590 
591 /** rtl_cache_magazine_constructor()
592  */
593 static int
594 rtl_cache_magazine_constructor (void * obj, void * arg)
595 {
596 	rtl_cache_magazine_type * mag = (rtl_cache_magazine_type*)(obj);
597 	/* @@@ sal_Size size = (sal_Size)(arg); @@@ */
598 
599     (void) arg; /* unused */
600 
601 	mag->m_mag_next = 0;
602 	mag->m_mag_size = RTL_CACHE_MAGAZINE_SIZE;
603 	mag->m_mag_used = 0;
604 
605 	return (1);
606 }
607 
608 
609 /** rtl_cache_magazine_destructor()
610  */
611 static void
612 rtl_cache_magazine_destructor (void * obj, void * arg)
613 {
614 #if OSL_DEBUG_LEVEL == 0
615     (void) obj; /* unused */
616 #else /* OSL_DEBUG_LEVEL */
617 	rtl_cache_magazine_type * mag = (rtl_cache_magazine_type*)(obj);
618 
619 	/* assure removed from queue(s) */
620 	OSL_ASSERT(mag->m_mag_next == 0);
621 
622 	/* assure no longer referenced */
623 	OSL_ASSERT(mag->m_mag_used == 0);
624 #endif /* OSL_DEBUG_LEVEL */
625 
626     (void) arg; /* unused */
627 }
628 
629 
630 /** rtl_cache_magazine_clear()
631  */
632 static void
633 rtl_cache_magazine_clear (
634 	rtl_cache_type *          cache,
635 	rtl_cache_magazine_type * mag
636 )
637 {
638 	for (; mag->m_mag_used > 0; --mag->m_mag_used)
639 	{
640 		void * obj = mag->m_objects[mag->m_mag_used - 1];
641 		mag->m_objects[mag->m_mag_used - 1] = 0;
642 
643         /* DEBUG ONLY: mark cached object allocated, undefined */
644         VALGRIND_MEMPOOL_ALLOC(cache, obj, cache->m_type_size);
645 		if (cache->m_destructor != 0)
646 		{
647             /* DEBUG ONLY: keep constructed object defined */
648             VALGRIND_MAKE_MEM_DEFINED(obj, cache->m_type_size);
649 
650 			/* destruct object */
651 			(cache->m_destructor)(obj, cache->m_userarg);
652 		}
653 
654 		/* return buffer to slab layer */
655 		rtl_cache_slab_free (cache, obj);
656 	}
657 }
658 
659 /* ================================================================= */
660 
661 /** rtl_cache_depot_enqueue()
662  *
663  *  @precond cache->m_depot_lock acquired.
664  */
665 static RTL_MEMORY_INLINE void
666 rtl_cache_depot_enqueue (
667 	rtl_cache_depot_type *    depot,
668 	rtl_cache_magazine_type * mag
669 )
670 {
671 	/* enqueue empty magazine */
672 	mag->m_mag_next = depot->m_mag_next;
673 	depot->m_mag_next = mag;
674 
675 	/* update depot stats */
676 	depot->m_mag_count++;
677 }
678 
679 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
680 #pragma inline(rtl_cache_depot_enqueue)
681 #endif /* __SUNPRO_C */
682 
683 
684 /** rtl_cache_depot_dequeue()
685  *
686  *  @precond cache->m_depot_lock acquired.
687  */
688 static RTL_MEMORY_INLINE rtl_cache_magazine_type *
689 rtl_cache_depot_dequeue (
690 	rtl_cache_depot_type * depot
691 )
692 {
693 	rtl_cache_magazine_type * mag = 0;
694 	if (depot->m_mag_count > 0)
695 	{
696 		/* dequeue magazine */
697 		OSL_ASSERT(depot->m_mag_next != 0);
698 
699 		mag = depot->m_mag_next;
700 		depot->m_mag_next = mag->m_mag_next;
701 		mag->m_mag_next = 0;
702 
703 		/* update depot stats */
704 		depot->m_mag_count--;
705 		depot->m_curr_min = SAL_MIN(depot->m_curr_min, depot->m_mag_count);
706 	}
707 	return (mag);
708 }
709 
710 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
711 #pragma inline(rtl_cache_depot_dequeue)
712 #endif /* __SUNPRO_C */
713 
714 
715 /** rtl_cache_depot_exchange_alloc()
716  *
717  *  @precond cache->m_depot_lock acquired.
718  */
719 static RTL_MEMORY_INLINE rtl_cache_magazine_type *
720 rtl_cache_depot_exchange_alloc (
721 	rtl_cache_type *          cache,
722 	rtl_cache_magazine_type * empty
723 )
724 {
725 	rtl_cache_magazine_type * full;
726 
727 	OSL_ASSERT((empty == 0) || (empty->m_mag_used == 0));
728 
729 	/* dequeue full magazine */
730 	full = rtl_cache_depot_dequeue (&(cache->m_depot_full));
731 	if ((full != 0) && (empty != 0))
732 	{
733 		/* enqueue empty magazine */
734 		rtl_cache_depot_enqueue (&(cache->m_depot_empty), empty);
735 	}
736 
737 	OSL_ASSERT((full == 0) || (full->m_mag_used > 0));
738 
739 	return (full);
740 }
741 
742 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
743 #pragma inline(rtl_cache_depot_exchange_alloc)
744 #endif /* __SUNPRO_C */
745 
746 
747 /** rtl_cache_depot_exchange_free()
748  *
749  *  @precond cache->m_depot_lock acquired.
750  */
751 static RTL_MEMORY_INLINE rtl_cache_magazine_type *
752 rtl_cache_depot_exchange_free (
753 	rtl_cache_type *          cache,
754 	rtl_cache_magazine_type * full
755 )
756 {
757 	rtl_cache_magazine_type * empty;
758 
759 	OSL_ASSERT((full == 0) || (full->m_mag_used > 0));
760 
761 	/* dequeue empty magazine */
762 	empty = rtl_cache_depot_dequeue (&(cache->m_depot_empty));
763 	if ((empty != 0) && (full != 0))
764 	{
765 		/* enqueue full magazine */
766 		rtl_cache_depot_enqueue (&(cache->m_depot_full), full);
767 	}
768 
769 	OSL_ASSERT((empty == 0) || (empty->m_mag_used == 0));
770 
771 	return (empty);
772 }
773 
774 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
775 #pragma inline(rtl_cache_depot_exchange_free)
776 #endif /* __SUNPRO_C */
777 
778 
779 /** rtl_cache_depot_populate()
780  *
781  *  @precond cache->m_depot_lock acquired.
782  */
783 static int
784 rtl_cache_depot_populate (
785 	rtl_cache_type * cache
786 )
787 {
788 	rtl_cache_magazine_type * empty = 0;
789 
790 	if (cache->m_magazine_cache != 0)
791 	{
792 		/* allocate new empty magazine */
793 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
794 		empty = (rtl_cache_magazine_type*)rtl_cache_alloc (cache->m_magazine_cache);
795 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
796 		if (empty != 0)
797 		{
798 			/* enqueue (new) empty magazine */
799 			rtl_cache_depot_enqueue (&(cache->m_depot_empty), empty);
800 		}
801     }
802 	return (empty != 0);
803 }
804 
805 /* ================================================================= */
806 
807 /** rtl_cache_constructor()
808  */
809 static int
810 rtl_cache_constructor (void * obj)
811 {
812 	rtl_cache_type * cache = (rtl_cache_type*)(obj);
813 
814 	memset (cache, 0, sizeof(rtl_cache_type));
815 
816 	/* linkage */
817 	QUEUE_START_NAMED(cache, cache_);
818 
819 	/* slab layer */
820 	(void)RTL_MEMORY_LOCK_INIT(&(cache->m_slab_lock));
821 
822 	QUEUE_START_NAMED(&(cache->m_free_head), slab_);
823 	QUEUE_START_NAMED(&(cache->m_used_head), slab_);
824 
825 	cache->m_hash_table = cache->m_hash_table_0;
826 	cache->m_hash_size  = RTL_CACHE_HASH_SIZE;
827 	cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
828 
829 	/* depot layer */
830 	(void)RTL_MEMORY_LOCK_INIT(&(cache->m_depot_lock));
831 
832 	return (1);
833 }
834 
835 /** rtl_cache_destructor()
836  */
837 static void
838 rtl_cache_destructor (void * obj)
839 {
840 	rtl_cache_type * cache = (rtl_cache_type*)(obj);
841 
842 	/* linkage */
843 	OSL_ASSERT(QUEUE_STARTED_NAMED(cache, cache_));
844 
845 	/* slab layer */
846 	(void)RTL_MEMORY_LOCK_DESTROY(&(cache->m_slab_lock));
847 
848 	OSL_ASSERT(QUEUE_STARTED_NAMED(&(cache->m_free_head), slab_));
849 	OSL_ASSERT(QUEUE_STARTED_NAMED(&(cache->m_used_head), slab_));
850 
851 	OSL_ASSERT(cache->m_hash_table == cache->m_hash_table_0);
852 	OSL_ASSERT(cache->m_hash_size  == RTL_CACHE_HASH_SIZE);
853 	OSL_ASSERT(cache->m_hash_shift == (sal_Size)(highbit(cache->m_hash_size) - 1));
854 
855 	/* depot layer */
856 	(void)RTL_MEMORY_LOCK_DESTROY(&(cache->m_depot_lock));
857 }
858 
859 /* ================================================================= */
860 
861 /** rtl_cache_activate()
862  */
863 static rtl_cache_type *
864 rtl_cache_activate (
865     rtl_cache_type * cache,
866     const char *     name,
867     size_t           objsize,
868     size_t           objalign,
869     int  (SAL_CALL * constructor)(void * obj, void * userarg),
870     void (SAL_CALL * destructor) (void * obj, void * userarg),
871 	void (SAL_CALL * reclaim)    (void * userarg),
872     void *           userarg,
873     rtl_arena_type * source,
874     int              flags
875 )
876 {
877 	OSL_ASSERT(cache != 0);
878 	if (cache != 0)
879 	{
880 		sal_Size slabsize;
881 
882 		snprintf (cache->m_name, sizeof(cache->m_name), "%s", name);
883 
884 		/* ensure minimum size (embedded bufctl linkage) */
885 		objsize = SAL_MAX(objsize, sizeof(rtl_cache_bufctl_type*));
886 
887 		if (objalign == 0)
888 		{
889 			/* determine default alignment */
890 			if (objsize >= RTL_MEMORY_ALIGNMENT_8)
891 				objalign = RTL_MEMORY_ALIGNMENT_8;
892 			else
893 				objalign = RTL_MEMORY_ALIGNMENT_4;
894 		}
895 		else
896 		{
897 			/* ensure minimum alignment */
898 			objalign = SAL_MAX(objalign, RTL_MEMORY_ALIGNMENT_4);
899 		}
900 		OSL_ASSERT(RTL_MEMORY_ISP2(objalign));
901 
902 		cache->m_type_size  = objsize = RTL_MEMORY_P2ROUNDUP(objsize, objalign);
903 		cache->m_type_align = objalign;
904 		cache->m_type_shift = highbit(cache->m_type_size) - 1;
905 
906 		cache->m_constructor = constructor;
907 		cache->m_destructor  = destructor;
908 		cache->m_reclaim     = reclaim;
909 		cache->m_userarg     = userarg;
910 
911 		/* slab layer */
912 		cache->m_source = source;
913 
914 		slabsize = source->m_quantum; /* minimum slab size */
915 		if (flags & RTL_CACHE_FLAG_QUANTUMCACHE)
916 		{
917 			/* next power of 2 above 3 * qcache_max */
918 			slabsize = SAL_MAX(slabsize, (1UL << highbit(3 * source->m_qcache_max)));
919 		}
920 		else
921 		{
922 		    /* waste at most 1/8 of slab */
923 		    slabsize = SAL_MAX(slabsize, cache->m_type_size * 8);
924 		}
925 
926 		slabsize = RTL_MEMORY_P2ROUNDUP(slabsize, source->m_quantum);
927 		if (!RTL_MEMORY_ISP2(slabsize))
928 			slabsize = 1UL << highbit(slabsize);
929 		cache->m_slab_size = slabsize;
930 
931 		if (cache->m_slab_size > source->m_quantum)
932 		{
933 			OSL_ASSERT(gp_cache_slab_cache != 0);
934 			OSL_ASSERT(gp_cache_bufctl_cache != 0);
935 
936 			cache->m_features  |= RTL_CACHE_FEATURE_HASH;
937 			cache->m_ntypes     = cache->m_slab_size / cache->m_type_size;
938 			cache->m_ncolor_max = cache->m_slab_size % cache->m_type_size;
939 		}
940 		else
941 		{
942 			/* embedded slab struct */
943 			cache->m_ntypes     = (cache->m_slab_size - sizeof(rtl_cache_slab_type)) / cache->m_type_size;
944 			cache->m_ncolor_max = (cache->m_slab_size - sizeof(rtl_cache_slab_type)) % cache->m_type_size;
945 		}
946 
947 		OSL_ASSERT(cache->m_ntypes > 0);
948 		cache->m_ncolor = 0;
949 
950 		if (flags & RTL_CACHE_FLAG_BULKDESTROY)
951 		{
952 			/* allow bulk slab delete upon cache deactivation */
953 			cache->m_features |= RTL_CACHE_FEATURE_BULKDESTROY;
954 		}
955 
956 		/* magazine layer */
957 		if (!(flags & RTL_CACHE_FLAG_NOMAGAZINE))
958 		{
959 			OSL_ASSERT(gp_cache_magazine_cache != 0);
960 			cache->m_magazine_cache = gp_cache_magazine_cache;
961 		}
962 
963 		/* insert into cache list */
964 		RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
965 		QUEUE_INSERT_TAIL_NAMED(&(g_cache_list.m_cache_head), cache, cache_);
966 		RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
967 	}
968 	return (cache);
969 }
970 
971 /** rtl_cache_deactivate()
972  */
973 static void
974 rtl_cache_deactivate (
975     rtl_cache_type * cache
976 )
977 {
978 	int active = 1;
979 
980 	/* remove from cache list */
981 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
982 	active = QUEUE_STARTED_NAMED(cache, cache_) == 0;
983 	QUEUE_REMOVE_NAMED(cache, cache_);
984 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
985 
986 	OSL_PRECOND(active, "rtl_cache_deactivate(): orphaned cache.");
987 
988 	/* cleanup magazine layer */
989 	if (cache->m_magazine_cache != 0)
990 	{
991 		rtl_cache_type *          mag_cache;
992 		rtl_cache_magazine_type * mag;
993 
994 		/* prevent recursion */
995 		mag_cache = cache->m_magazine_cache, cache->m_magazine_cache = 0;
996 
997 		/* cleanup cpu layer */
998 		if ((mag = cache->m_cpu_curr) != 0)
999 		{
1000 			cache->m_cpu_curr = 0;
1001 			rtl_cache_magazine_clear (cache, mag);
1002 			rtl_cache_free (mag_cache, mag);
1003 		}
1004 		if ((mag = cache->m_cpu_prev) != 0)
1005 		{
1006 			cache->m_cpu_prev = 0;
1007 			rtl_cache_magazine_clear (cache, mag);
1008 			rtl_cache_free (mag_cache, mag);
1009 		}
1010 
1011 		/* cleanup depot layer */
1012 		while ((mag = rtl_cache_depot_dequeue(&(cache->m_depot_full))) != 0)
1013 		{
1014 			rtl_cache_magazine_clear (cache, mag);
1015 			rtl_cache_free (mag_cache, mag);
1016 		}
1017 		while ((mag = rtl_cache_depot_dequeue(&(cache->m_depot_empty))) != 0)
1018 		{
1019 			rtl_cache_magazine_clear (cache, mag);
1020 			rtl_cache_free (mag_cache, mag);
1021 		}
1022 	}
1023 
1024 	OSL_TRACE(
1025 		"rtl_cache_deactivate(\"%s\"): "
1026 		"[slab]: allocs: %"PRIu64", frees: %"PRIu64"; total: %lu, used: %lu; "
1027 		"[cpu]: allocs: %"PRIu64", frees: %"PRIu64"; "
1028 		"[total]: allocs: %"PRIu64", frees: %"PRIu64"",
1029 		cache->m_name,
1030 		cache->m_slab_stats.m_alloc, cache->m_slab_stats.m_free,
1031 		cache->m_slab_stats.m_mem_total, cache->m_slab_stats.m_mem_alloc,
1032 		cache->m_cpu_stats.m_alloc, cache->m_cpu_stats.m_free,
1033 		cache->m_slab_stats.m_alloc + cache->m_cpu_stats.m_alloc,
1034 		cache->m_slab_stats.m_free  + cache->m_cpu_stats.m_free
1035 	);
1036 
1037 	/* cleanup slab layer */
1038 	if (cache->m_slab_stats.m_alloc > cache->m_slab_stats.m_free)
1039 	{
1040 		OSL_TRACE(
1041 			"rtl_cache_deactivate(\"%s\"): "
1042 			"cleaning up %"PRIu64" leaked buffer(s) [%lu bytes] [%lu total]",
1043 			cache->m_name,
1044 			cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free,
1045 			cache->m_slab_stats.m_mem_alloc, cache->m_slab_stats.m_mem_total
1046 		);
1047 
1048 		if (cache->m_features & RTL_CACHE_FEATURE_HASH)
1049 		{
1050 			/* cleanup bufctl(s) for leaking buffer(s) */
1051 			sal_Size i, n = cache->m_hash_size;
1052 			for (i = 0; i < n; i++)
1053 			{
1054 				rtl_cache_bufctl_type * bufctl;
1055 				while ((bufctl = cache->m_hash_table[i]) != 0)
1056 				{
1057 					/* pop from hash table */
1058 					cache->m_hash_table[i] = bufctl->m_next, bufctl->m_next = 0;
1059 
1060 					/* return to bufctl cache */
1061 					rtl_cache_free (gp_cache_bufctl_cache, bufctl);
1062 				}
1063 			}
1064 		}
1065 		{
1066 			/* force cleanup of remaining slabs */
1067 			rtl_cache_slab_type *head, *slab;
1068 
1069 			head = &(cache->m_used_head);
1070 			for (slab = head->m_slab_next; slab != head; slab = head->m_slab_next)
1071 			{
1072 				/* remove from 'used' queue */
1073 				QUEUE_REMOVE_NAMED(slab, slab_);
1074 
1075 				/* update stats */
1076 				cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
1077 
1078 				/* free slab */
1079 				rtl_cache_slab_destroy (cache, slab);
1080 			}
1081 
1082 			head = &(cache->m_free_head);
1083 			for (slab = head->m_slab_next; slab != head; slab = head->m_slab_next)
1084 			{
1085 				/* remove from 'free' queue */
1086 				QUEUE_REMOVE_NAMED(slab, slab_);
1087 
1088 				/* update stats */
1089 				cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
1090 
1091 				/* free slab */
1092 				rtl_cache_slab_destroy (cache, slab);
1093 			}
1094 		}
1095 	}
1096 
1097 	if (cache->m_hash_table != cache->m_hash_table_0)
1098 	{
1099 		rtl_arena_free (
1100 			gp_cache_arena,
1101 			cache->m_hash_table,
1102 			cache->m_hash_size * sizeof(rtl_cache_bufctl_type*));
1103 
1104 		cache->m_hash_table = cache->m_hash_table_0;
1105 		cache->m_hash_size  = RTL_CACHE_HASH_SIZE;
1106 		cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
1107 	}
1108 }
1109 
1110 /* ================================================================= *
1111  *
1112  * cache implementation.
1113  *
1114  * ================================================================= */
1115 
1116 /** rtl_cache_create()
1117  */
1118 rtl_cache_type *
1119 SAL_CALL rtl_cache_create (
1120     const char *     name,
1121     sal_Size         objsize,
1122     sal_Size         objalign,
1123     int  (SAL_CALL * constructor)(void * obj, void * userarg),
1124     void (SAL_CALL * destructor) (void * obj, void * userarg),
1125 	void (SAL_CALL * reclaim)    (void * userarg),
1126     void *           userarg,
1127     rtl_arena_type * source,
1128     int              flags
1129 ) SAL_THROW_EXTERN_C()
1130 {
1131 	rtl_cache_type * result = 0;
1132 	sal_Size         size   = sizeof(rtl_cache_type);
1133 
1134 try_alloc:
1135 	result = (rtl_cache_type*)rtl_arena_alloc (gp_cache_arena, &size);
1136 	if (result != 0)
1137 	{
1138 		rtl_cache_type * cache = result;
1139 		VALGRIND_CREATE_MEMPOOL(cache, 0, 0);
1140 		(void) rtl_cache_constructor (cache);
1141 
1142 		if (!source)
1143 		{
1144 			/* use default arena */
1145 			OSL_ASSERT(gp_default_arena != 0);
1146 			source = gp_default_arena;
1147 		}
1148 
1149 		result = rtl_cache_activate (
1150 			cache,
1151 			name,
1152 			objsize,
1153 			objalign,
1154 			constructor,
1155 			destructor,
1156 			reclaim,
1157 			userarg,
1158 			source,
1159 			flags
1160 		);
1161 
1162 		if (result == 0)
1163 		{
1164 			/* activation failed */
1165 			rtl_cache_deactivate (cache);
1166 			rtl_cache_destructor (cache);
1167 			VALGRIND_DESTROY_MEMPOOL(cache);
1168 			rtl_arena_free (gp_cache_arena, cache, size);
1169 		}
1170 	}
1171 	else if (gp_cache_arena == 0)
1172 	{
1173 		if (rtl_cache_init())
1174 		{
1175 			/* try again */
1176 			goto try_alloc;
1177 		}
1178 	}
1179 	return (result);
1180 }
1181 
1182 /** rtl_cache_destroy()
1183  */
1184 void SAL_CALL rtl_cache_destroy (
1185     rtl_cache_type * cache
1186 ) SAL_THROW_EXTERN_C()
1187 {
1188 	if (cache != 0)
1189 	{
1190 		rtl_cache_deactivate (cache);
1191 		rtl_cache_destructor (cache);
1192 		VALGRIND_DESTROY_MEMPOOL(cache);
1193 		rtl_arena_free (gp_cache_arena, cache, sizeof(rtl_cache_type));
1194 	}
1195 }
1196 
1197 /** rtl_cache_alloc()
1198  */
1199 void *
1200 SAL_CALL rtl_cache_alloc (
1201     rtl_cache_type * cache
1202 ) SAL_THROW_EXTERN_C()
1203 {
1204 	void * obj = 0;
1205 
1206 	if (cache == 0)
1207 		return (0);
1208 
1209 	if (cache->m_cpu_curr != 0)
1210 	{
1211 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1212 
1213 		for (;;)
1214 		{
1215 			/* take object from magazine layer */
1216 			rtl_cache_magazine_type *curr, *prev, *temp;
1217 
1218 			curr = cache->m_cpu_curr;
1219 			if ((curr != 0) && (curr->m_mag_used > 0))
1220 			{
1221 				obj = curr->m_objects[--curr->m_mag_used];
1222 #if defined(HAVE_VALGRIND_MEMCHECK_H)
1223 				VALGRIND_MEMPOOL_ALLOC(cache, obj, cache->m_type_size);
1224                 if (cache->m_constructor != 0)
1225                 {
1226                     /* keep constructed object defined */
1227                     VALGRIND_MAKE_MEM_DEFINED(obj, cache->m_type_size);
1228                 }
1229 #endif /* HAVE_VALGRIND_MEMCHECK_H */
1230 				cache->m_cpu_stats.m_alloc += 1;
1231 				RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1232 
1233 				return (obj);
1234 			}
1235 
1236 			prev = cache->m_cpu_prev;
1237 			if ((prev != 0) && (prev->m_mag_used > 0))
1238 			{
1239 				temp = cache->m_cpu_curr;
1240 				cache->m_cpu_curr = cache->m_cpu_prev;
1241 				cache->m_cpu_prev = temp;
1242 
1243 				continue;
1244 			}
1245 
1246 			temp = rtl_cache_depot_exchange_alloc (cache, prev);
1247 			if (temp != 0)
1248 			{
1249 				cache->m_cpu_prev = cache->m_cpu_curr;
1250 				cache->m_cpu_curr = temp;
1251 
1252 				continue;
1253 			}
1254 
1255 			/* no full magazine: fall through to slab layer */
1256 			break;
1257 		}
1258 
1259 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1260 	}
1261 
1262 	/* alloc buffer from slab layer */
1263 	obj = rtl_cache_slab_alloc (cache);
1264 	if ((obj != 0) && (cache->m_constructor != 0))
1265 	{
1266 	    /* construct object */
1267 	    if (!((cache->m_constructor)(obj, cache->m_userarg)))
1268 	    {
1269 	        /* construction failure */
1270 	        rtl_cache_slab_free (cache, obj), obj = 0;
1271 	    }
1272 	}
1273 	return (obj);
1274 }
1275 
1276 /** rtl_cache_free()
1277  */
1278 void
1279 SAL_CALL rtl_cache_free (
1280     rtl_cache_type * cache,
1281     void *           obj
1282 ) SAL_THROW_EXTERN_C()
1283 {
1284 	if ((obj != 0) && (cache != 0))
1285 	{
1286 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1287 
1288 		for (;;)
1289 		{
1290 			/* return object to magazine layer */
1291 			rtl_cache_magazine_type *curr, *prev, *temp;
1292 
1293 			curr = cache->m_cpu_curr;
1294 			if ((curr != 0) && (curr->m_mag_used < curr->m_mag_size))
1295 			{
1296 				curr->m_objects[curr->m_mag_used++] = obj;
1297 #if defined(HAVE_VALGRIND_MEMCHECK_H)
1298 				VALGRIND_MEMPOOL_FREE(cache, obj);
1299 #endif /* HAVE_VALGRIND_MEMCHECK_H */
1300 				cache->m_cpu_stats.m_free += 1;
1301 				RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1302 
1303 				return;
1304 			}
1305 
1306 			prev = cache->m_cpu_prev;
1307 			if ((prev != 0) && (prev->m_mag_used == 0))
1308 			{
1309 				temp = cache->m_cpu_curr;
1310 				cache->m_cpu_curr = cache->m_cpu_prev;
1311 				cache->m_cpu_prev = temp;
1312 
1313 				continue;
1314 			}
1315 
1316 			temp = rtl_cache_depot_exchange_free (cache, prev);
1317 			if (temp != 0)
1318 			{
1319 				cache->m_cpu_prev = cache->m_cpu_curr;
1320 				cache->m_cpu_curr = temp;
1321 
1322 				continue;
1323 			}
1324 
1325 			if (rtl_cache_depot_populate(cache) != 0)
1326 			{
1327 				continue;
1328 			}
1329 
1330 			/* no empty magazine: fall through to slab layer */
1331 			break;
1332 		}
1333 
1334 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1335 
1336 		/* no space for constructed object in magazine layer */
1337 		if (cache->m_destructor != 0)
1338 		{
1339 			/* destruct object */
1340 			(cache->m_destructor)(obj, cache->m_userarg);
1341 		}
1342 
1343 		/* return buffer to slab layer */
1344 		rtl_cache_slab_free (cache, obj);
1345 	}
1346 }
1347 
1348 /* ================================================================= *
1349  *
1350  * cache wsupdate (machdep) internals.
1351  *
1352  * ================================================================= */
1353 
1354 /** rtl_cache_wsupdate_init()
1355  *
1356  *  @precond g_cache_list.m_lock initialized
1357  */
1358 static void
1359 rtl_cache_wsupdate_init (void);
1360 
1361 
1362 /** rtl_cache_wsupdate_wait()
1363  *
1364  *  @precond g_cache_list.m_lock acquired
1365  */
1366 static void
1367 rtl_cache_wsupdate_wait (
1368 	unsigned int seconds
1369 );
1370 
1371 /** rtl_cache_wsupdate_fini()
1372  *
1373  */
1374 static void
1375 rtl_cache_wsupdate_fini (void);
1376 
1377 /* ================================================================= */
1378 
1379 #if defined(SAL_UNX) || defined(SAL_OS2)
1380 
1381 #include <sys/time.h>
1382 
1383 static void *
1384 rtl_cache_wsupdate_all (void * arg);
1385 
1386 static void
1387 rtl_cache_wsupdate_init (void)
1388 {
1389 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1390 	g_cache_list.m_update_done = 0;
1391 	(void) pthread_cond_init (&(g_cache_list.m_update_cond), NULL);
1392 	if (pthread_create (
1393 			&(g_cache_list.m_update_thread), NULL, rtl_cache_wsupdate_all, (void*)(10)) != 0)
1394 	{
1395 		/* failure */
1396 		g_cache_list.m_update_thread = (pthread_t)(0);
1397 	}
1398 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1399 }
1400 
1401 static void
1402 rtl_cache_wsupdate_wait (unsigned int seconds)
1403 {
1404 	if (seconds > 0)
1405 	{
1406 		struct timeval  now;
1407 		struct timespec wakeup;
1408 
1409 		gettimeofday(&now, 0);
1410 		wakeup.tv_sec  = now.tv_sec + (seconds);
1411 		wakeup.tv_nsec = now.tv_usec * 1000;
1412 
1413 		(void) pthread_cond_timedwait (
1414 			&(g_cache_list.m_update_cond),
1415 			&(g_cache_list.m_lock),
1416 			&wakeup);
1417 	}
1418 }
1419 
1420 static void
1421 rtl_cache_wsupdate_fini (void)
1422 {
1423 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1424 	g_cache_list.m_update_done = 1;
1425 	pthread_cond_signal (&(g_cache_list.m_update_cond));
1426 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1427 
1428 	if (g_cache_list.m_update_thread != (pthread_t)(0))
1429 		pthread_join (g_cache_list.m_update_thread, NULL);
1430 }
1431 
1432 /* ================================================================= */
1433 
1434 #elif defined(SAL_W32)
1435 
1436 static DWORD WINAPI
1437 rtl_cache_wsupdate_all (void * arg);
1438 
1439 static void
1440 rtl_cache_wsupdate_init (void)
1441 {
1442 	DWORD dwThreadId;
1443 
1444 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1445 	g_cache_list.m_update_done = 0;
1446 	g_cache_list.m_update_cond = CreateEvent (0, TRUE, FALSE, 0);
1447 
1448 	g_cache_list.m_update_thread =
1449 		CreateThread (NULL, 0, rtl_cache_wsupdate_all, (LPVOID)(10), 0, &dwThreadId);
1450 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1451 }
1452 
1453 static void
1454 rtl_cache_wsupdate_wait (unsigned int seconds)
1455 {
1456 	if (seconds > 0)
1457 	{
1458 		RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1459 		WaitForSingleObject (g_cache_list.m_update_cond, (DWORD)(seconds * 1000));
1460 		RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1461 	}
1462 }
1463 
1464 static void
1465 rtl_cache_wsupdate_fini (void)
1466 {
1467 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1468 	g_cache_list.m_update_done = 1;
1469 	SetEvent (g_cache_list.m_update_cond);
1470 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1471 
1472 	WaitForSingleObject (g_cache_list.m_update_thread, INFINITE);
1473 }
1474 
1475 #endif /* SAL_UNX || SAL_W32 */
1476 
1477 /* ================================================================= */
1478 
1479 /** rtl_cache_depot_wsupdate()
1480  *  update depot stats and purge excess magazines.
1481  *
1482  *  @precond cache->m_depot_lock acquired
1483  */
1484 static void
1485 rtl_cache_depot_wsupdate (
1486 	rtl_cache_type *       cache,
1487 	rtl_cache_depot_type * depot
1488 )
1489 {
1490 	sal_Size npurge;
1491 
1492 	depot->m_prev_min = depot->m_curr_min;
1493 	depot->m_curr_min = depot->m_mag_count;
1494 
1495 	npurge = SAL_MIN(depot->m_curr_min, depot->m_prev_min);
1496 	for (; npurge > 0; npurge--)
1497 	{
1498 		rtl_cache_magazine_type * mag = rtl_cache_depot_dequeue (depot);
1499 		if (mag != 0)
1500 		{
1501 			RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1502 			rtl_cache_magazine_clear (cache, mag);
1503 			rtl_cache_free (cache->m_magazine_cache, mag);
1504 			RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1505 		}
1506 	}
1507 }
1508 
1509 /** rtl_cache_wsupdate()
1510  *
1511  *  @precond cache->m_depot_lock released
1512  */
1513 static void
1514 rtl_cache_wsupdate (
1515 	rtl_cache_type * cache
1516 )
1517 {
1518 	if (cache->m_magazine_cache != 0)
1519 	{
1520 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1521 
1522 		OSL_TRACE(
1523 			"rtl_cache_wsupdate(\"%s\") "
1524 			"[depot: count, curr_min, prev_min] "
1525 			"full: %lu, %lu, %lu; empty: %lu, %lu, %lu",
1526 			cache->m_name,
1527 			cache->m_depot_full.m_mag_count,
1528 			cache->m_depot_full.m_curr_min,
1529 			cache->m_depot_full.m_prev_min,
1530 			cache->m_depot_empty.m_mag_count,
1531 			cache->m_depot_empty.m_curr_min,
1532 			cache->m_depot_empty.m_prev_min
1533 		);
1534 
1535 		rtl_cache_depot_wsupdate (cache, &(cache->m_depot_full));
1536 		rtl_cache_depot_wsupdate (cache, &(cache->m_depot_empty));
1537 
1538 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1539 	}
1540 }
1541 
1542 /** rtl_cache_wsupdate_all()
1543  *
1544  */
1545 #if defined(SAL_UNX) || defined(SAL_OS2)
1546 static void *
1547 #elif defined(SAL_W32)
1548 static DWORD WINAPI
1549 #endif /* SAL_UNX || SAL_W32 */
1550 rtl_cache_wsupdate_all (void * arg)
1551 {
1552 	unsigned int seconds = (unsigned int)SAL_INT_CAST(sal_uIntPtr, arg);
1553 
1554 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1555 	while (!g_cache_list.m_update_done)
1556 	{
1557 		rtl_cache_wsupdate_wait (seconds);
1558 		if (!g_cache_list.m_update_done)
1559 		{
1560 			rtl_cache_type * head, * cache;
1561 
1562 			head = &(g_cache_list.m_cache_head);
1563 			for (cache  = head->m_cache_next;
1564 				 cache != head;
1565 				 cache  = cache->m_cache_next)
1566 			{
1567 				rtl_cache_wsupdate (cache);
1568 			}
1569 		}
1570 	}
1571 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1572 
1573 	return (0);
1574 }
1575 
1576 /* ================================================================= *
1577  *
1578  * cache initialization.
1579  *
1580  * ================================================================= */
1581 
1582 static void
1583 rtl_cache_once_init (void)
1584 {
1585 	{
1586 		/* list of caches */
1587 		RTL_MEMORY_LOCK_INIT(&(g_cache_list.m_lock));
1588 		(void) rtl_cache_constructor (&(g_cache_list.m_cache_head));
1589 	}
1590 	{
1591 		/* cache: internal arena */
1592 		OSL_ASSERT(gp_cache_arena == 0);
1593 
1594 		gp_cache_arena = rtl_arena_create (
1595 			"rtl_cache_internal_arena",
1596 			64,   /* quantum */
1597 			0,    /* no quantum caching */
1598 			NULL, /* default source */
1599 			rtl_arena_alloc,
1600 			rtl_arena_free,
1601 			0     /* flags */
1602 		);
1603 		OSL_ASSERT(gp_cache_arena != 0);
1604 
1605 		/* check 'gp_default_arena' initialization */
1606 		OSL_ASSERT(gp_default_arena != 0);
1607 	}
1608 	{
1609 		/* cache: magazine cache */
1610 		static rtl_cache_type g_cache_magazine_cache;
1611 
1612 		OSL_ASSERT(gp_cache_magazine_cache == 0);
1613 		VALGRIND_CREATE_MEMPOOL(&g_cache_magazine_cache, 0, 0);
1614 		(void) rtl_cache_constructor (&g_cache_magazine_cache);
1615 
1616 		gp_cache_magazine_cache = rtl_cache_activate (
1617 			&g_cache_magazine_cache,
1618 			"rtl_cache_magazine_cache",
1619 			sizeof(rtl_cache_magazine_type), /* objsize  */
1620 			0,                               /* objalign */
1621 			rtl_cache_magazine_constructor,
1622 			rtl_cache_magazine_destructor,
1623 			0, /* reclaim */
1624 			0, /* userarg: NYI */
1625 			gp_default_arena, /* source */
1626 			RTL_CACHE_FLAG_NOMAGAZINE /* during bootstrap; activated below */
1627 		);
1628 		OSL_ASSERT(gp_cache_magazine_cache != 0);
1629 
1630 		/* activate magazine layer */
1631 		g_cache_magazine_cache.m_magazine_cache = gp_cache_magazine_cache;
1632 	}
1633 	{
1634 		/* cache: slab (struct) cache */
1635 		static rtl_cache_type g_cache_slab_cache;
1636 
1637 		OSL_ASSERT(gp_cache_slab_cache == 0);
1638 		VALGRIND_CREATE_MEMPOOL(&g_cache_slab_cache, 0, 0);
1639 		(void) rtl_cache_constructor (&g_cache_slab_cache);
1640 
1641 		gp_cache_slab_cache = rtl_cache_activate (
1642 			&g_cache_slab_cache,
1643 			"rtl_cache_slab_cache",
1644 			sizeof(rtl_cache_slab_type), /* objsize  */
1645 			0,                           /* objalign */
1646 			rtl_cache_slab_constructor,
1647 			rtl_cache_slab_destructor,
1648 			0,                           /* reclaim */
1649 			0,                           /* userarg: none */
1650 			gp_default_arena,            /* source */
1651 			0                            /* flags: none */
1652 		);
1653 		OSL_ASSERT(gp_cache_slab_cache != 0);
1654 	}
1655 	{
1656 		/* cache: bufctl cache */
1657 		static rtl_cache_type g_cache_bufctl_cache;
1658 
1659 		OSL_ASSERT(gp_cache_bufctl_cache == 0);
1660 		VALGRIND_CREATE_MEMPOOL(&g_cache_bufctl_cache, 0, 0);
1661 		(void) rtl_cache_constructor (&g_cache_bufctl_cache);
1662 
1663 		gp_cache_bufctl_cache = rtl_cache_activate (
1664 			&g_cache_bufctl_cache,
1665 			"rtl_cache_bufctl_cache",
1666 			sizeof(rtl_cache_bufctl_type), /* objsize */
1667 			0,                             /* objalign  */
1668 			0,                /* constructor */
1669 			0,                /* destructor */
1670 			0,                /* reclaim */
1671 			0,                /* userarg */
1672 			gp_default_arena, /* source */
1673 			0                 /* flags: none */
1674 		);
1675 		OSL_ASSERT(gp_cache_bufctl_cache != 0);
1676 	}
1677 
1678 	rtl_cache_wsupdate_init();
1679 }
1680 
1681 static int
1682 rtl_cache_init (void)
1683 {
1684 	static sal_once_type g_once = SAL_ONCE_INIT;
1685 	SAL_ONCE(&g_once, rtl_cache_once_init);
1686 	return (gp_cache_arena != 0);
1687 }
1688 
1689 /* ================================================================= */
1690 
1691 /*
1692   Issue http://udk.openoffice.org/issues/show_bug.cgi?id=92388
1693 
1694   Mac OS X does not seem to support "__cxa__atexit", thus leading
1695   to the situation that "__attribute__((destructor))__" functions
1696   (in particular "rtl_{memory|cache|arena}_fini") become called
1697   _before_ global C++ object d'tors.
1698 
1699   Delegated the call to "rtl_cache_fini()" into a dummy C++ object,
1700   see alloc_fini.cxx .
1701 */
1702 #if defined(__GNUC__) && !defined(MACOSX)
1703 static void rtl_cache_fini (void) __attribute__((destructor));
1704 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
1705 #pragma fini(rtl_cache_fini)
1706 static void rtl_cache_fini (void);
1707 #endif /* __GNUC__ || __SUNPRO_C */
1708 
1709 void
1710 rtl_cache_fini (void)
1711 {
1712 	if (gp_cache_arena != 0)
1713 	{
1714 		rtl_cache_type * cache, * head;
1715 
1716 		rtl_cache_wsupdate_fini();
1717 
1718 		if (gp_cache_bufctl_cache != 0)
1719 		{
1720 			cache = gp_cache_bufctl_cache, gp_cache_bufctl_cache = 0;
1721 			rtl_cache_deactivate (cache);
1722 			rtl_cache_destructor (cache);
1723 			VALGRIND_DESTROY_MEMPOOL(cache);
1724 		}
1725 		if (gp_cache_slab_cache != 0)
1726 		{
1727 			cache = gp_cache_slab_cache, gp_cache_slab_cache = 0;
1728 			rtl_cache_deactivate (cache);
1729 			rtl_cache_destructor (cache);
1730 			VALGRIND_DESTROY_MEMPOOL(cache);
1731 		}
1732 		if (gp_cache_magazine_cache != 0)
1733 		{
1734 			cache = gp_cache_magazine_cache, gp_cache_magazine_cache = 0;
1735 			rtl_cache_deactivate (cache);
1736 			rtl_cache_destructor (cache);
1737 			VALGRIND_DESTROY_MEMPOOL(cache);
1738 		}
1739 		if (gp_cache_arena != 0)
1740 		{
1741 			rtl_arena_destroy (gp_cache_arena);
1742 			gp_cache_arena = 0;
1743 		}
1744 
1745 		RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1746 		head = &(g_cache_list.m_cache_head);
1747 		for (cache = head->m_cache_next; cache != head; cache = cache->m_cache_next)
1748 		{
1749 			OSL_TRACE(
1750 				"rtl_cache_fini(\"%s\") "
1751 				"[slab]: allocs: %"PRIu64", frees: %"PRIu64"; total: %lu, used: %lu; "
1752 				"[cpu]: allocs: %"PRIu64", frees: %"PRIu64"; "
1753 				"[total]: allocs: %"PRIu64", frees: %"PRIu64"",
1754 				cache->m_name,
1755 				cache->m_slab_stats.m_alloc, cache->m_slab_stats.m_free,
1756 				cache->m_slab_stats.m_mem_total, cache->m_slab_stats.m_mem_alloc,
1757 				cache->m_cpu_stats.m_alloc, cache->m_cpu_stats.m_free,
1758 				cache->m_slab_stats.m_alloc + cache->m_cpu_stats.m_alloc,
1759 				cache->m_slab_stats.m_free + cache->m_cpu_stats.m_free
1760 			);
1761 		}
1762 		RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1763 	}
1764 }
1765 
1766 /* ================================================================= */
1767