xref: /aoo42x/main/sal/rtl/source/alloc_cache.c (revision 787e1130)
1 /**************************************************************
2  *
3  * Licensed to the Apache Software Foundation (ASF) under one
4  * or more contributor license agreements.  See the NOTICE file
5  * distributed with this work for additional information
6  * regarding copyright ownership.  The ASF licenses this file
7  * to you under the Apache License, Version 2.0 (the
8  * "License"); you may not use this file except in compliance
9  * with the License.  You may obtain a copy of the License at
10  *
11  *   http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing,
14  * software distributed under the License is distributed on an
15  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16  * KIND, either express or implied.  See the License for the
17  * specific language governing permissions and limitations
18  * under the License.
19  *
20  *************************************************************/
21 
22 
23 
24 #include "alloc_cache.h"
25 #include "alloc_impl.h"
26 #include "alloc_arena.h"
27 #include "internal/once.h"
28 #include "sal/macros.h"
29 #include "osl/diagnose.h"
30 
31 #ifndef INCLUDED_STRING_H
32 #include <string.h>
33 #endif
34 
35 #ifndef INCLUDED_STDIO_H
36 #include <stdio.h>
37 #endif
38 
39 #ifdef OS2
40 #undef OSL_TRACE
41 #define OSL_TRACE                  1 ? ((void)0) : _OSL_GLOBAL osl_trace
42 
43 #define INCL_DOS
44 #include <os2.h>
45 
46 #endif
47 
48 /* ================================================================= *
49  *
50  * cache internals.
51  *
52  * ================================================================= */
53 
54 /** g_cache_list
55  *  @internal
56  */
57 struct rtl_cache_list_st
58 {
59 	rtl_memory_lock_type m_lock;
60 	rtl_cache_type       m_cache_head;
61 
62 #if defined(SAL_UNX)
63 	pthread_t            m_update_thread;
64 	pthread_cond_t       m_update_cond;
65 #elif defined(SAL_OS2)
66 	TID                  m_update_thread;
67 	HEV                  m_update_cond;
68 #elif defined(SAL_W32)
69 	HANDLE               m_update_thread;
70 	HANDLE               m_update_cond;
71 #endif /* SAL_UNX || SAL_W32 */
72 	int                  m_update_done;
73 };
74 
75 static struct rtl_cache_list_st g_cache_list;
76 
77 
78 /** gp_cache_arena
79  *  provided for cache_type allocations, and hash_table resizing.
80  *
81  *  @internal
82  */
83 static rtl_arena_type * gp_cache_arena = NULL;
84 
85 
86 /** gp_cache_magazine_cache
87  *  @internal
88  */
89 static rtl_cache_type * gp_cache_magazine_cache = NULL;
90 
91 
92 /** gp_cache_slab_cache
93  *  @internal
94  */
95 static rtl_cache_type * gp_cache_slab_cache = NULL;
96 
97 
98 /** gp_cache_bufctl_cache
99  *  @internal
100  */
101 static rtl_cache_type * gp_cache_bufctl_cache = NULL;
102 
103 
104 /** rtl_cache_init()
105  *  @internal
106  */
107 static int
108 rtl_cache_init (void);
109 
110 
111 /* ================================================================= */
112 
113 /** RTL_CACHE_HASH_INDEX()
114  */
115 #define	RTL_CACHE_HASH_INDEX_IMPL(a, s, q, m) \
116  	((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
117 
118 #define	RTL_CACHE_HASH_INDEX(cache, addr) \
119     RTL_CACHE_HASH_INDEX_IMPL((addr), (cache)->m_hash_shift, (cache)->m_type_shift, ((cache)->m_hash_size - 1))
120 
121 
122 /** rtl_cache_hash_rescale()
123  */
124 static void
rtl_cache_hash_rescale(rtl_cache_type * cache,sal_Size new_size)125 rtl_cache_hash_rescale (
126 	rtl_cache_type * cache,
127 	sal_Size         new_size
128 )
129 {
130 	rtl_cache_bufctl_type ** new_table;
131 	sal_Size                 new_bytes;
132 
133 	new_bytes = new_size * sizeof(rtl_cache_bufctl_type*);
134 	new_table = (rtl_cache_bufctl_type**)rtl_arena_alloc(gp_cache_arena, &new_bytes);
135 
136 	if (new_table != NULL)
137 	{
138 		rtl_cache_bufctl_type ** old_table;
139 		sal_Size                 old_size, i;
140 
141 		memset (new_table, 0, new_bytes);
142 
143 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
144 
145 		old_table = cache->m_hash_table;
146 		old_size  = cache->m_hash_size;
147 
148 		OSL_TRACE(
149 			"rtl_cache_hash_rescale(\"%s\"): "
150 			"nbuf: % " PRIu64 " (ave: %" PRIu64 "), frees: %" PRIu64 " "
151 			"[old_size: %lu, new_size: %lu]",
152 			cache->m_name,
153 			cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free,
154 			(cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free) >> cache->m_hash_shift,
155 			cache->m_slab_stats.m_free,
156 			old_size, new_size);
157 
158 		cache->m_hash_table = new_table;
159 		cache->m_hash_size  = new_size;
160 		cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
161 
162 		for (i = 0; i < old_size; i++)
163 		{
164 			rtl_cache_bufctl_type * curr = old_table[i];
165 			while (curr != NULL)
166 			{
167 				rtl_cache_bufctl_type  * next = curr->m_next;
168 				rtl_cache_bufctl_type ** head;
169 
170 				head = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, curr->m_addr)]);
171 				curr->m_next = (*head);
172 				(*head) = curr;
173 
174 				curr = next;
175 			}
176 			old_table[i] = NULL;
177 		}
178 
179 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
180 
181 		if (old_table != cache->m_hash_table_0)
182 		{
183 			sal_Size old_bytes = old_size * sizeof(rtl_cache_bufctl_type*);
184 			rtl_arena_free (gp_cache_arena, old_table, old_bytes);
185 		}
186 	}
187 }
188 
189 /** rtl_cache_hash_insert()
190  */
191 static RTL_MEMORY_INLINE sal_uIntPtr
rtl_cache_hash_insert(rtl_cache_type * cache,rtl_cache_bufctl_type * bufctl)192 rtl_cache_hash_insert (
193 	rtl_cache_type *        cache,
194 	rtl_cache_bufctl_type * bufctl
195 )
196 {
197 	rtl_cache_bufctl_type ** ppHead;
198 
199 	ppHead = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, bufctl->m_addr)]);
200 
201 	bufctl->m_next = (*ppHead);
202 	(*ppHead) = bufctl;
203 
204 	return (bufctl->m_addr);
205 }
206 
207 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
208 #pragma inline(rtl_cache_hash_insert)
209 #endif /* __SUNPRO_C */
210 
211 
212 /** rtl_cache_hash_remove()
213  */
214 static rtl_cache_bufctl_type *
rtl_cache_hash_remove(rtl_cache_type * cache,sal_uIntPtr addr)215 rtl_cache_hash_remove (
216 	rtl_cache_type * cache,
217 	sal_uIntPtr      addr
218 )
219 {
220 	rtl_cache_bufctl_type ** ppHead;
221 	rtl_cache_bufctl_type  * bufctl;
222 	sal_Size                 lookups = 0;
223 
224 	ppHead = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, addr)]);
225 	while ((bufctl = *ppHead) != NULL)
226 	{
227 		if (bufctl->m_addr == addr)
228 		{
229 			*ppHead = bufctl->m_next, bufctl->m_next = NULL;
230 			break;
231 		}
232 
233 		lookups += 1;
234 		ppHead = &(bufctl->m_next);
235 	}
236 
237 	OSL_ASSERT (bufctl != NULL); /* bad free */
238 
239 	if (lookups > 1)
240 	{
241 		sal_Size nbuf = (sal_Size)(cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free);
242 		if (nbuf > 4 * cache->m_hash_size)
243 		{
244 			if (!(cache->m_features & RTL_CACHE_FEATURE_RESCALE))
245 			{
246 				sal_Size ave = nbuf >> cache->m_hash_shift;
247 				sal_Size new_size = cache->m_hash_size << (highbit(ave) - 1);
248 
249 				cache->m_features |= RTL_CACHE_FEATURE_RESCALE;
250 				RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
251 				rtl_cache_hash_rescale (cache, new_size);
252 				RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
253 				cache->m_features &= ~RTL_CACHE_FEATURE_RESCALE;
254 			}
255 		}
256 	}
257 
258 	return (bufctl);
259 }
260 
261 /* ================================================================= */
262 
263 /** RTL_CACHE_SLAB()
264  */
265 #define RTL_CACHE_SLAB(addr, size) \
266     (((rtl_cache_slab_type*)(RTL_MEMORY_P2END((sal_uIntPtr)(addr), (size)))) - 1)
267 
268 
269 /** rtl_cache_slab_constructor()
270  */
271 static int
rtl_cache_slab_constructor(void * obj,void * arg)272 rtl_cache_slab_constructor (void * obj, void * arg)
273 {
274 	rtl_cache_slab_type * slab = (rtl_cache_slab_type*)(obj);
275 
276     (void) arg; /* unused */
277 
278 	QUEUE_START_NAMED(slab, slab_);
279 	slab->m_ntypes = 0;
280 
281 	return (1);
282 }
283 
284 
285 /** rtl_cache_slab_destructor()
286  */
287 static void
rtl_cache_slab_destructor(void * obj,void * arg)288 rtl_cache_slab_destructor (void * obj, void * arg)
289 {
290 #if OSL_DEBUG_LEVEL == 0
291     (void) obj; /* unused */
292 #else /* OSL_DEBUG_LEVEL */
293 	rtl_cache_slab_type * slab = (rtl_cache_slab_type*)(obj);
294 
295 	/* assure removed from queue(s) */
296 	OSL_ASSERT(QUEUE_STARTED_NAMED(slab, slab_));
297 
298 	/* assure no longer referenced */
299 	OSL_ASSERT(slab->m_ntypes == 0);
300 #endif /* OSL_DEBUG_LEVEL */
301 
302     (void) arg; /* unused */
303 }
304 
305 
306 /** rtl_cache_slab_create()
307  *
308  *  @precond cache->m_slab_lock released.
309  */
310 static rtl_cache_slab_type *
rtl_cache_slab_create(rtl_cache_type * cache)311 rtl_cache_slab_create (
312 	rtl_cache_type * cache
313 )
314 {
315 	rtl_cache_slab_type * slab = NULL;
316 	void *                addr;
317 	sal_Size              size;
318 
319 	size = cache->m_slab_size;
320 	addr = rtl_arena_alloc (cache->m_source, &size);
321 	if (addr != NULL)
322 	{
323 		OSL_ASSERT(size >= cache->m_slab_size);
324 
325 		if (cache->m_features & RTL_CACHE_FEATURE_HASH)
326 		{
327 			/* allocate slab struct from slab cache */
328 			OSL_ASSERT (cache != gp_cache_slab_cache);
329 			slab = (rtl_cache_slab_type*)rtl_cache_alloc (gp_cache_slab_cache);
330 		}
331 		else
332 		{
333 			/* construct embedded slab struct */
334 			slab = RTL_CACHE_SLAB(addr, cache->m_slab_size);
335 			(void) rtl_cache_slab_constructor (slab, 0);
336 		}
337 		if (slab != NULL)
338 		{
339 			slab->m_data = (sal_uIntPtr)(addr);
340 
341 			/* dynamic freelist initialization */
342 			slab->m_bp = slab->m_data;
343 			slab->m_sp = NULL;
344 		}
345 		else
346 		{
347 			rtl_arena_free (cache->m_source, addr, size);
348 		}
349 	}
350 	return (slab);
351 }
352 
353 
354 /** rtl_cache_slab_destroy()
355  *
356  *  @precond cache->m_slab_lock released.
357  */
358 static void
rtl_cache_slab_destroy(rtl_cache_type * cache,rtl_cache_slab_type * slab)359 rtl_cache_slab_destroy (
360 	rtl_cache_type *      cache,
361 	rtl_cache_slab_type * slab
362 )
363 {
364 	void *   addr   = (void*)(slab->m_data);
365 	sal_Size refcnt = slab->m_ntypes; slab->m_ntypes = 0;
366 
367 	if (cache->m_features & RTL_CACHE_FEATURE_HASH)
368 	{
369 		/* cleanup bufctl(s) for free buffer(s) */
370 		sal_Size ntypes = (slab->m_bp - slab->m_data) / cache->m_type_size;
371 		for (ntypes -= refcnt; slab->m_sp != NULL; ntypes--)
372 		{
373 			rtl_cache_bufctl_type * bufctl = slab->m_sp;
374 
375 			/* pop from freelist */
376 			slab->m_sp = bufctl->m_next, bufctl->m_next = NULL;
377 
378 			/* return bufctl struct to bufctl cache */
379 			rtl_cache_free (gp_cache_bufctl_cache, bufctl);
380 		}
381 		OSL_ASSERT(ntypes == 0);
382 
383 		/* return slab struct to slab cache */
384 		rtl_cache_free (gp_cache_slab_cache, slab);
385 	}
386 	else
387 	{
388 		/* destruct embedded slab struct */
389 		rtl_cache_slab_destructor (slab, 0);
390 	}
391 
392 	if ((refcnt == 0) || (cache->m_features & RTL_CACHE_FEATURE_BULKDESTROY))
393 	{
394 		/* free memory */
395 		rtl_arena_free (cache->m_source, addr, cache->m_slab_size);
396 	}
397 }
398 
399 
400 /** rtl_cache_slab_populate()
401  *
402  *  @precond cache->m_slab_lock acquired.
403  */
404 static int
rtl_cache_slab_populate(rtl_cache_type * cache)405 rtl_cache_slab_populate (
406 	rtl_cache_type * cache
407 )
408 {
409 	rtl_cache_slab_type * slab;
410 
411 	RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
412 	slab = rtl_cache_slab_create (cache);
413 	RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
414 	if (slab != NULL)
415 	{
416 		/* update buffer start addr w/ current color */
417 		slab->m_bp += cache->m_ncolor;
418 
419 		/* update color for next slab */
420 		cache->m_ncolor += cache->m_type_align;
421 		if (cache->m_ncolor > cache->m_ncolor_max)
422 			cache->m_ncolor = 0;
423 
424 		/* update stats */
425 		cache->m_slab_stats.m_mem_total += cache->m_slab_size;
426 
427 		/* insert onto 'free' queue */
428 		QUEUE_INSERT_HEAD_NAMED(&(cache->m_free_head), slab, slab_);
429 	}
430 	return (slab != NULL);
431 }
432 
433 /* ================================================================= */
434 
435 /** rtl_cache_slab_alloc()
436  *
437  *  Allocate a buffer from slab layer; used by magazine layer.
438  */
439 static void *
rtl_cache_slab_alloc(rtl_cache_type * cache)440 rtl_cache_slab_alloc (
441 	rtl_cache_type * cache
442 )
443 {
444 	void                * addr = NULL;
445 	rtl_cache_slab_type * head;
446 
447 	RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
448 
449 	head = &(cache->m_free_head);
450 	if ((head->m_slab_next != head) || rtl_cache_slab_populate (cache))
451 	{
452 		rtl_cache_slab_type   * slab;
453 		rtl_cache_bufctl_type * bufctl;
454 
455 		slab = head->m_slab_next;
456 		OSL_ASSERT(slab->m_ntypes < cache->m_ntypes);
457 
458 		if (slab->m_sp == NULL)
459 		{
460 			/* initialize bufctl w/ current 'slab->m_bp' */
461 			OSL_ASSERT (slab->m_bp < slab->m_data + cache->m_ntypes * cache->m_type_size + cache->m_ncolor_max);
462 			if (cache->m_features & RTL_CACHE_FEATURE_HASH)
463 			{
464 				/* allocate bufctl */
465 				OSL_ASSERT (cache != gp_cache_bufctl_cache);
466 				bufctl = (rtl_cache_bufctl_type*)rtl_cache_alloc (gp_cache_bufctl_cache);
467 				if (bufctl == NULL)
468 				{
469 					/* out of memory */
470 					RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
471 					return (0);
472 				}
473 
474 				bufctl->m_addr = slab->m_bp;
475 				bufctl->m_slab = (sal_uIntPtr)(slab);
476 			}
477 			else
478 			{
479 				/* embedded bufctl */
480 				bufctl = (rtl_cache_bufctl_type*)(slab->m_bp);
481 			}
482 			bufctl->m_next = NULL;
483 
484 			/* update 'slab->m_bp' to next free buffer */
485 			slab->m_bp += cache->m_type_size;
486 
487 			/* assign bufctl to freelist */
488 			slab->m_sp = bufctl;
489 		}
490 
491 		/* pop front */
492 		bufctl = slab->m_sp;
493 		slab->m_sp = bufctl->m_next;
494 
495 		/* increment usage, check for full slab */
496 		if ((slab->m_ntypes += 1) == cache->m_ntypes)
497 		{
498 			/* remove from 'free' queue */
499 			QUEUE_REMOVE_NAMED(slab, slab_);
500 
501 			/* insert onto 'used' queue (tail) */
502 			QUEUE_INSERT_TAIL_NAMED(&(cache->m_used_head), slab, slab_);
503 		}
504 
505 		/* update stats */
506 		cache->m_slab_stats.m_alloc     += 1;
507 		cache->m_slab_stats.m_mem_alloc += cache->m_type_size;
508 
509 		if (cache->m_features & RTL_CACHE_FEATURE_HASH)
510 			addr = (void*)rtl_cache_hash_insert (cache, bufctl);
511 		else
512 			addr = bufctl;
513 
514 		/* DEBUG ONLY: mark allocated, undefined */
515 		OSL_DEBUG_ONLY(memset(addr, 0x77777777, cache->m_type_size));
516 		VALGRIND_MEMPOOL_ALLOC(cache, addr, cache->m_type_size);
517 	}
518 
519 	RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
520 	return (addr);
521 }
522 
523 
524 /** rtl_cache_slab_free()
525  *
526  *  Return a buffer to slab layer; used by magazine layer.
527  */
528 static void
rtl_cache_slab_free(rtl_cache_type * cache,void * addr)529 rtl_cache_slab_free (
530 	rtl_cache_type * cache,
531 	void *           addr
532 )
533 {
534 	rtl_cache_bufctl_type * bufctl;
535 	rtl_cache_slab_type   * slab;
536 
537 	RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
538 
539 	/* DEBUG ONLY: mark unallocated, undefined */
540 	VALGRIND_MEMPOOL_FREE(cache, addr);
541 	/* OSL_DEBUG_ONLY() */ VALGRIND_MAKE_MEM_UNDEFINED(addr, cache->m_type_size);
542     OSL_DEBUG_ONLY(memset(addr, 0x33333333, cache->m_type_size));
543 
544 	/* determine slab from addr */
545 	if (cache->m_features & RTL_CACHE_FEATURE_HASH)
546 	{
547 		bufctl = rtl_cache_hash_remove (cache, (sal_uIntPtr)(addr));
548 		slab = (bufctl != NULL) ? (rtl_cache_slab_type*)(bufctl->m_slab) : 0;
549 	}
550 	else
551 	{
552 		/* embedded slab struct */
553 		bufctl = (rtl_cache_bufctl_type*)(addr);
554 		slab = RTL_CACHE_SLAB(addr, cache->m_slab_size);
555 	}
556 
557 	if (slab != NULL)
558 	{
559 		/* check for full slab */
560 		if (slab->m_ntypes == cache->m_ntypes)
561 		{
562 			/* remove from 'used' queue */
563 			QUEUE_REMOVE_NAMED(slab, slab_);
564 
565 			/* insert onto 'free' queue (head) */
566 			QUEUE_INSERT_HEAD_NAMED(&(cache->m_free_head), slab, slab_);
567 		}
568 
569 		/* push front */
570 		bufctl->m_next = slab->m_sp;
571 		slab->m_sp = bufctl;
572 
573 		/* update stats */
574 		cache->m_slab_stats.m_free      += 1;
575 		cache->m_slab_stats.m_mem_alloc -= cache->m_type_size;
576 
577 		/* decrement usage, check for empty slab */
578 		if ((slab->m_ntypes -= 1) == 0)
579 		{
580 			/* remove from 'free' queue */
581 			QUEUE_REMOVE_NAMED(slab, slab_);
582 
583 			/* update stats */
584 			cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
585 
586 			/* free 'empty' slab */
587 			RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
588 			rtl_cache_slab_destroy (cache, slab);
589 			return;
590 		}
591 	}
592 
593 	RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
594 }
595 
596 /* ================================================================= */
597 
598 /** rtl_cache_magazine_constructor()
599  */
600 static int
rtl_cache_magazine_constructor(void * obj,void * arg)601 rtl_cache_magazine_constructor (void * obj, void * arg)
602 {
603 	rtl_cache_magazine_type * mag = (rtl_cache_magazine_type*)(obj);
604 	/* @@@ sal_Size size = (sal_Size)(arg); @@@ */
605 
606     (void) arg; /* unused */
607 
608 	mag->m_mag_next = NULL;
609 	mag->m_mag_size = RTL_CACHE_MAGAZINE_SIZE;
610 	mag->m_mag_used = 0;
611 
612 	return (1);
613 }
614 
615 
616 /** rtl_cache_magazine_destructor()
617  */
618 static void
rtl_cache_magazine_destructor(void * obj,void * arg)619 rtl_cache_magazine_destructor (void * obj, void * arg)
620 {
621 #if OSL_DEBUG_LEVEL == 0
622     (void) obj; /* unused */
623 #else /* OSL_DEBUG_LEVEL */
624 	rtl_cache_magazine_type * mag = (rtl_cache_magazine_type*)(obj);
625 
626 	/* assure removed from queue(s) */
627 	OSL_ASSERT(mag->m_mag_next == NULL);
628 
629 	/* assure no longer referenced */
630 	OSL_ASSERT(mag->m_mag_used == 0);
631 #endif /* OSL_DEBUG_LEVEL */
632 
633     (void) arg; /* unused */
634 }
635 
636 
637 /** rtl_cache_magazine_clear()
638  */
639 static void
rtl_cache_magazine_clear(rtl_cache_type * cache,rtl_cache_magazine_type * mag)640 rtl_cache_magazine_clear (
641 	rtl_cache_type *          cache,
642 	rtl_cache_magazine_type * mag
643 )
644 {
645 	for (; mag->m_mag_used > 0; --mag->m_mag_used)
646 	{
647 		void * obj = mag->m_objects[mag->m_mag_used - 1];
648 		mag->m_objects[mag->m_mag_used - 1] = NULL;
649 
650         /* DEBUG ONLY: mark cached object allocated, undefined */
651         VALGRIND_MEMPOOL_ALLOC(cache, obj, cache->m_type_size);
652 		if (cache->m_destructor != 0)
653 		{
654             /* DEBUG ONLY: keep constructed object defined */
655             VALGRIND_MAKE_MEM_DEFINED(obj, cache->m_type_size);
656 
657 			/* destruct object */
658 			(cache->m_destructor)(obj, cache->m_userarg);
659 		}
660 
661 		/* return buffer to slab layer */
662 		rtl_cache_slab_free (cache, obj);
663 	}
664 }
665 
666 /* ================================================================= */
667 
668 /** rtl_cache_depot_enqueue()
669  *
670  *  @precond cache->m_depot_lock acquired.
671  */
672 static RTL_MEMORY_INLINE void
rtl_cache_depot_enqueue(rtl_cache_depot_type * depot,rtl_cache_magazine_type * mag)673 rtl_cache_depot_enqueue (
674 	rtl_cache_depot_type *    depot,
675 	rtl_cache_magazine_type * mag
676 )
677 {
678 	/* enqueue empty magazine */
679 	mag->m_mag_next = depot->m_mag_next;
680 	depot->m_mag_next = mag;
681 
682 	/* update depot stats */
683 	depot->m_mag_count++;
684 }
685 
686 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
687 #pragma inline(rtl_cache_depot_enqueue)
688 #endif /* __SUNPRO_C */
689 
690 
691 /** rtl_cache_depot_dequeue()
692  *
693  *  @precond cache->m_depot_lock acquired.
694  */
695 static RTL_MEMORY_INLINE rtl_cache_magazine_type *
rtl_cache_depot_dequeue(rtl_cache_depot_type * depot)696 rtl_cache_depot_dequeue (
697 	rtl_cache_depot_type * depot
698 )
699 {
700 	rtl_cache_magazine_type * mag = NULL;
701 	if (depot->m_mag_count > 0)
702 	{
703 		/* dequeue magazine */
704 		OSL_ASSERT(depot->m_mag_next != NULL);
705 
706 		mag = depot->m_mag_next;
707 		depot->m_mag_next = mag->m_mag_next;
708 		mag->m_mag_next = NULL;
709 
710 		/* update depot stats */
711 		depot->m_mag_count--;
712 		depot->m_curr_min = SAL_MIN(depot->m_curr_min, depot->m_mag_count);
713 	}
714 	return (mag);
715 }
716 
717 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
718 #pragma inline(rtl_cache_depot_dequeue)
719 #endif /* __SUNPRO_C */
720 
721 
722 /** rtl_cache_depot_exchange_alloc()
723  *
724  *  @precond cache->m_depot_lock acquired.
725  */
726 static RTL_MEMORY_INLINE rtl_cache_magazine_type *
rtl_cache_depot_exchange_alloc(rtl_cache_type * cache,rtl_cache_magazine_type * empty)727 rtl_cache_depot_exchange_alloc (
728 	rtl_cache_type *          cache,
729 	rtl_cache_magazine_type * empty
730 )
731 {
732 	rtl_cache_magazine_type * full;
733 
734 	OSL_ASSERT((empty == NULL) || (empty->m_mag_used == 0));
735 
736 	/* dequeue full magazine */
737 	full = rtl_cache_depot_dequeue (&(cache->m_depot_full));
738 	if ((full != NULL) && (empty != NULL))
739 	{
740 		/* enqueue empty magazine */
741 		rtl_cache_depot_enqueue (&(cache->m_depot_empty), empty);
742 	}
743 
744 	OSL_ASSERT((full == NULL) || (full->m_mag_used > 0));
745 
746 	return (full);
747 }
748 
749 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
750 #pragma inline(rtl_cache_depot_exchange_alloc)
751 #endif /* __SUNPRO_C */
752 
753 
754 /** rtl_cache_depot_exchange_free()
755  *
756  *  @precond cache->m_depot_lock acquired.
757  */
758 static RTL_MEMORY_INLINE rtl_cache_magazine_type *
rtl_cache_depot_exchange_free(rtl_cache_type * cache,rtl_cache_magazine_type * full)759 rtl_cache_depot_exchange_free (
760 	rtl_cache_type *          cache,
761 	rtl_cache_magazine_type * full
762 )
763 {
764 	rtl_cache_magazine_type * empty;
765 
766 	OSL_ASSERT((full == NULL) || (full->m_mag_used > 0));
767 
768 	/* dequeue empty magazine */
769 	empty = rtl_cache_depot_dequeue (&(cache->m_depot_empty));
770 	if ((empty != NULL) && (full != NULL))
771 	{
772 		/* enqueue full magazine */
773 		rtl_cache_depot_enqueue (&(cache->m_depot_full), full);
774 	}
775 
776 	OSL_ASSERT((empty == NULL) || (empty->m_mag_used == 0));
777 
778 	return (empty);
779 }
780 
781 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
782 #pragma inline(rtl_cache_depot_exchange_free)
783 #endif /* __SUNPRO_C */
784 
785 
786 /** rtl_cache_depot_populate()
787  *
788  *  @precond cache->m_depot_lock acquired.
789  */
790 static int
rtl_cache_depot_populate(rtl_cache_type * cache)791 rtl_cache_depot_populate (
792 	rtl_cache_type * cache
793 )
794 {
795 	rtl_cache_magazine_type * empty = NULL;
796 
797 	if (cache->m_magazine_cache != 0)
798 	{
799 		/* allocate new empty magazine */
800 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
801 		empty = (rtl_cache_magazine_type*)rtl_cache_alloc (cache->m_magazine_cache);
802 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
803 		if (empty != NULL)
804 		{
805 			/* enqueue (new) empty magazine */
806 			rtl_cache_depot_enqueue (&(cache->m_depot_empty), empty);
807 		}
808     }
809 	return (empty != NULL);
810 }
811 
812 /* ================================================================= */
813 
814 /** rtl_cache_constructor()
815  */
816 static int
rtl_cache_constructor(void * obj)817 rtl_cache_constructor (void * obj)
818 {
819 	rtl_cache_type * cache = (rtl_cache_type*)(obj);
820 
821 	memset (cache, 0, sizeof(rtl_cache_type));
822 
823 	/* linkage */
824 	QUEUE_START_NAMED(cache, cache_);
825 
826 	/* slab layer */
827 	(void)RTL_MEMORY_LOCK_INIT(&(cache->m_slab_lock));
828 
829 	QUEUE_START_NAMED(&(cache->m_free_head), slab_);
830 	QUEUE_START_NAMED(&(cache->m_used_head), slab_);
831 
832 	cache->m_hash_table = cache->m_hash_table_0;
833 	cache->m_hash_size  = RTL_CACHE_HASH_SIZE;
834 	cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
835 
836 	/* depot layer */
837 	(void)RTL_MEMORY_LOCK_INIT(&(cache->m_depot_lock));
838 
839 	return (1);
840 }
841 
842 /** rtl_cache_destructor()
843  */
844 static void
rtl_cache_destructor(void * obj)845 rtl_cache_destructor (void * obj)
846 {
847 	rtl_cache_type * cache = (rtl_cache_type*)(obj);
848 
849 	/* linkage */
850 	OSL_ASSERT(QUEUE_STARTED_NAMED(cache, cache_));
851 
852 	/* slab layer */
853 	(void)RTL_MEMORY_LOCK_DESTROY(&(cache->m_slab_lock));
854 
855 	OSL_ASSERT(QUEUE_STARTED_NAMED(&(cache->m_free_head), slab_));
856 	OSL_ASSERT(QUEUE_STARTED_NAMED(&(cache->m_used_head), slab_));
857 
858 	OSL_ASSERT(cache->m_hash_table == cache->m_hash_table_0);
859 	OSL_ASSERT(cache->m_hash_size  == RTL_CACHE_HASH_SIZE);
860 	OSL_ASSERT(cache->m_hash_shift == (sal_Size)(highbit(cache->m_hash_size) - 1));
861 
862 	/* depot layer */
863 	(void)RTL_MEMORY_LOCK_DESTROY(&(cache->m_depot_lock));
864 }
865 
866 /* ================================================================= */
867 
868 /** rtl_cache_activate()
869  */
870 static rtl_cache_type *
rtl_cache_activate(rtl_cache_type * cache,const char * name,size_t objsize,size_t objalign,int (SAL_CALL * constructor)(void * obj,void * userarg),void (SAL_CALL * destructor)(void * obj,void * userarg),void (SAL_CALL * reclaim)(void * userarg),void * userarg,rtl_arena_type * source,int flags)871 rtl_cache_activate (
872     rtl_cache_type * cache,
873     const char *     name,
874     size_t           objsize,
875     size_t           objalign,
876     int  (SAL_CALL * constructor)(void * obj, void * userarg),
877     void (SAL_CALL * destructor) (void * obj, void * userarg),
878 	void (SAL_CALL * reclaim)    (void * userarg),
879     void *           userarg,
880     rtl_arena_type * source,
881     int              flags
882 )
883 {
884 	OSL_ASSERT(cache != NULL);
885 	if (cache != NULL)
886 	{
887 		sal_Size slabsize;
888 
889 		snprintf (cache->m_name, sizeof(cache->m_name), "%s", name);
890 
891 		/* ensure minimum size (embedded bufctl linkage) */
892 		objsize = SAL_MAX(objsize, sizeof(rtl_cache_bufctl_type*));
893 
894 		if (objalign == 0)
895 		{
896 			/* determine default alignment */
897 #ifdef NEED_ALIGN16
898 			if (objsize >= RTL_MEMORY_ALIGNMENT_16)
899 				objalign = RTL_MEMORY_ALIGNMENT_16;
900 			else if (objsize >= RTL_MEMORY_ALIGNMENT_8)
901 #else
902 			if (objsize >= RTL_MEMORY_ALIGNMENT_8)
903 #endif
904 				objalign = RTL_MEMORY_ALIGNMENT_8;
905 			else
906 				objalign = RTL_MEMORY_ALIGNMENT_4;
907 		}
908 		else
909 		{
910 			/* ensure minimum alignment */
911 			objalign = SAL_MAX(objalign, RTL_MEMORY_ALIGNMENT_4);
912 		}
913 		OSL_ASSERT(RTL_MEMORY_ISP2(objalign));
914 
915 		cache->m_type_size  = objsize = RTL_MEMORY_P2ROUNDUP(objsize, objalign);
916 		cache->m_type_align = objalign;
917 		cache->m_type_shift = highbit(cache->m_type_size) - 1;
918 
919 		cache->m_constructor = constructor;
920 		cache->m_destructor  = destructor;
921 		cache->m_reclaim     = reclaim;
922 		cache->m_userarg     = userarg;
923 
924 		/* slab layer */
925 		cache->m_source = source;
926 
927 		slabsize = source->m_quantum; /* minimum slab size */
928 		if (flags & RTL_CACHE_FLAG_QUANTUMCACHE)
929 		{
930 			/* next power of 2 above 3 * qcache_max */
931 			slabsize = SAL_MAX(slabsize, (1UL << highbit(3 * source->m_qcache_max)));
932 		}
933 		else
934 		{
935 		    /* waste at most 1/8 of slab */
936 		    slabsize = SAL_MAX(slabsize, cache->m_type_size * 8);
937 		}
938 
939 		slabsize = RTL_MEMORY_P2ROUNDUP(slabsize, source->m_quantum);
940 		if (!RTL_MEMORY_ISP2(slabsize))
941 			slabsize = 1UL << highbit(slabsize);
942 		cache->m_slab_size = slabsize;
943 
944 		if (cache->m_slab_size > source->m_quantum)
945 		{
946 			OSL_ASSERT(gp_cache_slab_cache != NULL);
947 			OSL_ASSERT(gp_cache_bufctl_cache != NULL);
948 
949 			cache->m_features  |= RTL_CACHE_FEATURE_HASH;
950 			cache->m_ntypes     = cache->m_slab_size / cache->m_type_size;
951 			cache->m_ncolor_max = cache->m_slab_size % cache->m_type_size;
952 		}
953 		else
954 		{
955 			/* embedded slab struct */
956 			cache->m_ntypes     = (cache->m_slab_size - sizeof(rtl_cache_slab_type)) / cache->m_type_size;
957 			cache->m_ncolor_max = (cache->m_slab_size - sizeof(rtl_cache_slab_type)) % cache->m_type_size;
958 		}
959 
960 		OSL_ASSERT(cache->m_ntypes > 0);
961 		cache->m_ncolor = 0;
962 
963 		if (flags & RTL_CACHE_FLAG_BULKDESTROY)
964 		{
965 			/* allow bulk slab delete upon cache deactivation */
966 			cache->m_features |= RTL_CACHE_FEATURE_BULKDESTROY;
967 		}
968 
969 		/* magazine layer */
970 		if (!(flags & RTL_CACHE_FLAG_NOMAGAZINE))
971 		{
972 			OSL_ASSERT(gp_cache_magazine_cache != NULL);
973 			cache->m_magazine_cache = gp_cache_magazine_cache;
974 		}
975 
976 		/* insert into cache list */
977 		RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
978 		QUEUE_INSERT_TAIL_NAMED(&(g_cache_list.m_cache_head), cache, cache_);
979 		RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
980 	}
981 	return (cache);
982 }
983 
984 /** rtl_cache_deactivate()
985  */
986 static void
rtl_cache_deactivate(rtl_cache_type * cache)987 rtl_cache_deactivate (
988     rtl_cache_type * cache
989 )
990 {
991 	int active = 1;
992 
993 	/* remove from cache list */
994 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
995 	active = QUEUE_STARTED_NAMED(cache, cache_) == 0;
996 	QUEUE_REMOVE_NAMED(cache, cache_);
997 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
998 
999 	OSL_PRECOND(active, "rtl_cache_deactivate(): orphaned cache.");
1000 
1001 	/* cleanup magazine layer */
1002 	if (cache->m_magazine_cache != 0)
1003 	{
1004 		rtl_cache_type *          mag_cache;
1005 		rtl_cache_magazine_type * mag;
1006 
1007 		/* prevent recursion */
1008 		mag_cache = cache->m_magazine_cache, cache->m_magazine_cache = 0;
1009 
1010 		/* cleanup cpu layer */
1011 		if ((mag = cache->m_cpu_curr) != NULL)
1012 		{
1013 			cache->m_cpu_curr = 0;
1014 			rtl_cache_magazine_clear (cache, mag);
1015 			rtl_cache_free (mag_cache, mag);
1016 		}
1017 		if ((mag = cache->m_cpu_prev) != NULL)
1018 		{
1019 			cache->m_cpu_prev = 0;
1020 			rtl_cache_magazine_clear (cache, mag);
1021 			rtl_cache_free (mag_cache, mag);
1022 		}
1023 
1024 		/* cleanup depot layer */
1025 		while ((mag = rtl_cache_depot_dequeue(&(cache->m_depot_full))) != NULL)
1026 		{
1027 			rtl_cache_magazine_clear (cache, mag);
1028 			rtl_cache_free (mag_cache, mag);
1029 		}
1030 		while ((mag = rtl_cache_depot_dequeue(&(cache->m_depot_empty))) != NULL)
1031 		{
1032 			rtl_cache_magazine_clear (cache, mag);
1033 			rtl_cache_free (mag_cache, mag);
1034 		}
1035 	}
1036 
1037 	OSL_TRACE(
1038 		"rtl_cache_deactivate(\"%s\"): "
1039 		"[slab]: allocs: %"PRIu64", frees: %"PRIu64"; total: %lu, used: %lu; "
1040 		"[cpu]: allocs: %"PRIu64", frees: %"PRIu64"; "
1041 		"[total]: allocs: %"PRIu64", frees: %"PRIu64"",
1042 		cache->m_name,
1043 		cache->m_slab_stats.m_alloc, cache->m_slab_stats.m_free,
1044 		cache->m_slab_stats.m_mem_total, cache->m_slab_stats.m_mem_alloc,
1045 		cache->m_cpu_stats.m_alloc, cache->m_cpu_stats.m_free,
1046 		cache->m_slab_stats.m_alloc + cache->m_cpu_stats.m_alloc,
1047 		cache->m_slab_stats.m_free  + cache->m_cpu_stats.m_free
1048 	);
1049 
1050 	/* cleanup slab layer */
1051 	if (cache->m_slab_stats.m_alloc > cache->m_slab_stats.m_free)
1052 	{
1053 		OSL_TRACE(
1054 			"rtl_cache_deactivate(\"%s\"): "
1055 			"cleaning up %"PRIu64" leaked buffer(s) [%lu bytes] [%lu total]",
1056 			cache->m_name,
1057 			cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free,
1058 			cache->m_slab_stats.m_mem_alloc, cache->m_slab_stats.m_mem_total
1059 		);
1060 
1061 		if (cache->m_features & RTL_CACHE_FEATURE_HASH)
1062 		{
1063 			/* cleanup bufctl(s) for leaking buffer(s) */
1064 			sal_Size i, n = cache->m_hash_size;
1065 			for (i = 0; i < n; i++)
1066 			{
1067 				rtl_cache_bufctl_type * bufctl;
1068 				while ((bufctl = cache->m_hash_table[i]) != NULL)
1069 				{
1070 					/* pop from hash table */
1071 					cache->m_hash_table[i] = bufctl->m_next, bufctl->m_next = NULL;
1072 
1073 					/* return to bufctl cache */
1074 					rtl_cache_free (gp_cache_bufctl_cache, bufctl);
1075 				}
1076 			}
1077 		}
1078 		{
1079 			/* force cleanup of remaining slabs */
1080 			rtl_cache_slab_type *head, *slab;
1081 
1082 			head = &(cache->m_used_head);
1083 			for (slab = head->m_slab_next; slab != head; slab = head->m_slab_next)
1084 			{
1085 				/* remove from 'used' queue */
1086 				QUEUE_REMOVE_NAMED(slab, slab_);
1087 
1088 				/* update stats */
1089 				cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
1090 
1091 				/* free slab */
1092 				rtl_cache_slab_destroy (cache, slab);
1093 			}
1094 
1095 			head = &(cache->m_free_head);
1096 			for (slab = head->m_slab_next; slab != head; slab = head->m_slab_next)
1097 			{
1098 				/* remove from 'free' queue */
1099 				QUEUE_REMOVE_NAMED(slab, slab_);
1100 
1101 				/* update stats */
1102 				cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
1103 
1104 				/* free slab */
1105 				rtl_cache_slab_destroy (cache, slab);
1106 			}
1107 		}
1108 	}
1109 
1110 	if (cache->m_hash_table != cache->m_hash_table_0)
1111 	{
1112 		rtl_arena_free (
1113 			gp_cache_arena,
1114 			cache->m_hash_table,
1115 			cache->m_hash_size * sizeof(rtl_cache_bufctl_type*));
1116 
1117 		cache->m_hash_table = cache->m_hash_table_0;
1118 		cache->m_hash_size  = RTL_CACHE_HASH_SIZE;
1119 		cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
1120 	}
1121 }
1122 
1123 /* ================================================================= *
1124  *
1125  * cache implementation.
1126  *
1127  * ================================================================= */
1128 
1129 /** rtl_cache_create()
1130  */
1131 rtl_cache_type *
rtl_cache_create(const char * name,sal_Size objsize,sal_Size objalign,int (SAL_CALL * constructor)(void * obj,void * userarg),void (SAL_CALL * destructor)(void * obj,void * userarg),void (SAL_CALL * reclaim)(void * userarg),void * userarg,rtl_arena_type * source,int flags)1132 SAL_CALL rtl_cache_create (
1133     const char *     name,
1134     sal_Size         objsize,
1135     sal_Size         objalign,
1136     int  (SAL_CALL * constructor)(void * obj, void * userarg),
1137     void (SAL_CALL * destructor) (void * obj, void * userarg),
1138 	void (SAL_CALL * reclaim)    (void * userarg),
1139     void *           userarg,
1140     rtl_arena_type * source,
1141     int              flags
1142 ) SAL_THROW_EXTERN_C()
1143 {
1144 	rtl_cache_type * result = 0;
1145 	sal_Size         size   = sizeof(rtl_cache_type);
1146 
1147 try_alloc:
1148 	result = (rtl_cache_type*)rtl_arena_alloc (gp_cache_arena, &size);
1149 	if (result != 0)
1150 	{
1151 		rtl_cache_type * cache = result;
1152 		VALGRIND_CREATE_MEMPOOL(cache, 0, 0);
1153 		(void) rtl_cache_constructor (cache);
1154 
1155 		if (!source)
1156 		{
1157 			/* use default arena */
1158 			OSL_ASSERT(gp_default_arena != 0);
1159 			source = gp_default_arena;
1160 		}
1161 
1162 		result = rtl_cache_activate (
1163 			cache,
1164 			name,
1165 			objsize,
1166 			objalign,
1167 			constructor,
1168 			destructor,
1169 			reclaim,
1170 			userarg,
1171 			source,
1172 			flags
1173 		);
1174 
1175 		if (result == 0)
1176 		{
1177 			/* activation failed */
1178 			rtl_cache_deactivate (cache);
1179 			rtl_cache_destructor (cache);
1180 			VALGRIND_DESTROY_MEMPOOL(cache);
1181 			rtl_arena_free (gp_cache_arena, cache, size);
1182 		}
1183 	}
1184 	else if (gp_cache_arena == 0)
1185 	{
1186 		if (rtl_cache_init())
1187 		{
1188 			/* try again */
1189 			goto try_alloc;
1190 		}
1191 	}
1192 	return (result);
1193 }
1194 
1195 /** rtl_cache_destroy()
1196  */
rtl_cache_destroy(rtl_cache_type * cache)1197 void SAL_CALL rtl_cache_destroy (
1198     rtl_cache_type * cache
1199 ) SAL_THROW_EXTERN_C()
1200 {
1201 	if (cache != 0)
1202 	{
1203 		rtl_cache_deactivate (cache);
1204 		rtl_cache_destructor (cache);
1205 		VALGRIND_DESTROY_MEMPOOL(cache);
1206 		rtl_arena_free (gp_cache_arena, cache, sizeof(rtl_cache_type));
1207 	}
1208 }
1209 
1210 /** rtl_cache_alloc()
1211  */
1212 void *
rtl_cache_alloc(rtl_cache_type * cache)1213 SAL_CALL rtl_cache_alloc (
1214     rtl_cache_type * cache
1215 ) SAL_THROW_EXTERN_C()
1216 {
1217 	void * obj = 0;
1218 
1219 	if (cache == 0)
1220 		return (0);
1221 
1222 	if (cache->m_cpu_curr != 0)
1223 	{
1224 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1225 
1226 		for (;;)
1227 		{
1228 			/* take object from magazine layer */
1229 			rtl_cache_magazine_type *curr, *prev, *temp;
1230 
1231 			curr = cache->m_cpu_curr;
1232 			if ((curr != 0) && (curr->m_mag_used > 0))
1233 			{
1234 				obj = curr->m_objects[--curr->m_mag_used];
1235 #if defined(HAVE_VALGRIND_MEMCHECK_H)
1236 				VALGRIND_MEMPOOL_ALLOC(cache, obj, cache->m_type_size);
1237                 if (cache->m_constructor != 0)
1238                 {
1239                     /* keep constructed object defined */
1240                     VALGRIND_MAKE_MEM_DEFINED(obj, cache->m_type_size);
1241                 }
1242 #endif /* HAVE_VALGRIND_MEMCHECK_H */
1243 				cache->m_cpu_stats.m_alloc += 1;
1244 				RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1245 
1246 				return (obj);
1247 			}
1248 
1249 			prev = cache->m_cpu_prev;
1250 			if ((prev != 0) && (prev->m_mag_used > 0))
1251 			{
1252 				temp = cache->m_cpu_curr;
1253 				cache->m_cpu_curr = cache->m_cpu_prev;
1254 				cache->m_cpu_prev = temp;
1255 
1256 				continue;
1257 			}
1258 
1259 			temp = rtl_cache_depot_exchange_alloc (cache, prev);
1260 			if (temp != 0)
1261 			{
1262 				cache->m_cpu_prev = cache->m_cpu_curr;
1263 				cache->m_cpu_curr = temp;
1264 
1265 				continue;
1266 			}
1267 
1268 			/* no full magazine: fall through to slab layer */
1269 			break;
1270 		}
1271 
1272 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1273 	}
1274 
1275 	/* alloc buffer from slab layer */
1276 	obj = rtl_cache_slab_alloc (cache);
1277 	if ((obj != 0) && (cache->m_constructor != 0))
1278 	{
1279 	    /* construct object */
1280 	    if (!((cache->m_constructor)(obj, cache->m_userarg)))
1281 	    {
1282 	        /* construction failure */
1283 	        rtl_cache_slab_free (cache, obj), obj = 0;
1284 	    }
1285 	}
1286 	return (obj);
1287 }
1288 
1289 /** rtl_cache_free()
1290  */
1291 void
rtl_cache_free(rtl_cache_type * cache,void * obj)1292 SAL_CALL rtl_cache_free (
1293     rtl_cache_type * cache,
1294     void *           obj
1295 ) SAL_THROW_EXTERN_C()
1296 {
1297 	if ((obj != 0) && (cache != 0))
1298 	{
1299 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1300 
1301 		for (;;)
1302 		{
1303 			/* return object to magazine layer */
1304 			rtl_cache_magazine_type *curr, *prev, *temp;
1305 
1306 			curr = cache->m_cpu_curr;
1307 			if ((curr != 0) && (curr->m_mag_used < curr->m_mag_size))
1308 			{
1309 				curr->m_objects[curr->m_mag_used++] = obj;
1310 #if defined(HAVE_VALGRIND_MEMCHECK_H)
1311 				VALGRIND_MEMPOOL_FREE(cache, obj);
1312 #endif /* HAVE_VALGRIND_MEMCHECK_H */
1313 				cache->m_cpu_stats.m_free += 1;
1314 				RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1315 
1316 				return;
1317 			}
1318 
1319 			prev = cache->m_cpu_prev;
1320 			if ((prev != 0) && (prev->m_mag_used == 0))
1321 			{
1322 				temp = cache->m_cpu_curr;
1323 				cache->m_cpu_curr = cache->m_cpu_prev;
1324 				cache->m_cpu_prev = temp;
1325 
1326 				continue;
1327 			}
1328 
1329 			temp = rtl_cache_depot_exchange_free (cache, prev);
1330 			if (temp != 0)
1331 			{
1332 				cache->m_cpu_prev = cache->m_cpu_curr;
1333 				cache->m_cpu_curr = temp;
1334 
1335 				continue;
1336 			}
1337 
1338 			if (rtl_cache_depot_populate(cache) != 0)
1339 			{
1340 				continue;
1341 			}
1342 
1343 			/* no empty magazine: fall through to slab layer */
1344 			break;
1345 		}
1346 
1347 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1348 
1349 		/* no space for constructed object in magazine layer */
1350 		if (cache->m_destructor != 0)
1351 		{
1352 			/* destruct object */
1353 			(cache->m_destructor)(obj, cache->m_userarg);
1354 		}
1355 
1356 		/* return buffer to slab layer */
1357 		rtl_cache_slab_free (cache, obj);
1358 	}
1359 }
1360 
1361 /* ================================================================= *
1362  *
1363  * cache wsupdate (machdep) internals.
1364  *
1365  * ================================================================= */
1366 
1367 /** rtl_cache_wsupdate_init()
1368  *
1369  *  @precond g_cache_list.m_lock initialized
1370  */
1371 static void
1372 rtl_cache_wsupdate_init (void);
1373 
1374 
1375 /** rtl_cache_wsupdate_wait()
1376  *
1377  *  @precond g_cache_list.m_lock acquired
1378  */
1379 static void
1380 rtl_cache_wsupdate_wait (
1381 	unsigned int seconds
1382 );
1383 
1384 /** rtl_cache_wsupdate_fini()
1385  *
1386  */
1387 static void
1388 rtl_cache_wsupdate_fini (void);
1389 
1390 /* ================================================================= */
1391 
1392 #if defined(SAL_UNX)
1393 
1394 #include <sys/time.h>
1395 
1396 static void *
1397 rtl_cache_wsupdate_all (void * arg);
1398 
1399 static void
rtl_cache_wsupdate_init(void)1400 rtl_cache_wsupdate_init (void)
1401 {
1402 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1403 	g_cache_list.m_update_done = 0;
1404 	(void) pthread_cond_init (&(g_cache_list.m_update_cond), NULL);
1405 	if (pthread_create (
1406 			&(g_cache_list.m_update_thread), NULL, rtl_cache_wsupdate_all, (void*)(10)) != 0)
1407 	{
1408 		/* failure */
1409 		g_cache_list.m_update_thread = (pthread_t)(0);
1410 	}
1411 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1412 }
1413 
1414 static void
rtl_cache_wsupdate_wait(unsigned int seconds)1415 rtl_cache_wsupdate_wait (unsigned int seconds)
1416 {
1417 	if (seconds > 0)
1418 	{
1419 		struct timeval  now;
1420 		struct timespec wakeup;
1421 
1422 		gettimeofday(&now, 0);
1423 		wakeup.tv_sec  = now.tv_sec + (seconds);
1424 		wakeup.tv_nsec = now.tv_usec * 1000;
1425 
1426 		(void) pthread_cond_timedwait (
1427 			&(g_cache_list.m_update_cond),
1428 			&(g_cache_list.m_lock),
1429 			&wakeup);
1430 	}
1431 }
1432 
1433 static void
rtl_cache_wsupdate_fini(void)1434 rtl_cache_wsupdate_fini (void)
1435 {
1436 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1437 	g_cache_list.m_update_done = 1;
1438 	pthread_cond_signal (&(g_cache_list.m_update_cond));
1439 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1440 
1441 	if (g_cache_list.m_update_thread != (pthread_t)(0))
1442 		pthread_join (g_cache_list.m_update_thread, NULL);
1443 }
1444 
1445 /* ================================================================= */
1446 
1447 #elif defined(SAL_OS2)
1448 
1449 static void
1450 rtl_cache_wsupdate_all (void * arg);
1451 
1452 static void rtl_cache_fini (void);
1453 
1454 static void
rtl_cache_wsupdate_init(void)1455 rtl_cache_wsupdate_init (void)
1456 {
1457 	ULONG ulThreadId;
1458 	APIRET rc;
1459 
1460 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1461 	g_cache_list.m_update_done = 0;
1462 
1463 	// we use atexit() because this allows CRT exit to process handler before
1464 	// threads are killed. Otherwise with __attribute__(destructor) this
1465 	// function is called when DosExit starts processing DLL destruction
1466 	// which happens after ALL threads have been killed...
1467 	atexit( rtl_cache_fini);
1468 
1469 	//g_cache_list.m_update_cond = CreateEvent (0, TRUE, FALSE, 0);
1470 	/* Warp3 FP29 or Warp4 FP4 or better required */
1471 	rc = DosCreateEventSem( NULL, &g_cache_list.m_update_cond, 0x0800, 0);
1472 
1473 	g_cache_list.m_update_thread = (ULONG) _beginthread( rtl_cache_wsupdate_all, NULL,
1474 			        65*1024, (void*) 10);
1475 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1476 }
1477 
1478 static void
rtl_cache_wsupdate_wait(unsigned int seconds)1479 rtl_cache_wsupdate_wait (unsigned int seconds)
1480 {
1481 	APIRET rc;
1482 	if (seconds > 0)
1483 	{
1484 		RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1485 		rc = DosWaitEventSem(g_cache_list.m_update_cond, seconds*1000);
1486 		RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1487 	}
1488 }
1489 
1490 static void
rtl_cache_wsupdate_fini(void)1491 rtl_cache_wsupdate_fini (void)
1492 {
1493 	APIRET rc;
1494 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1495 	g_cache_list.m_update_done = 1;
1496 	rc = DosPostEventSem(g_cache_list.m_update_cond);
1497 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1498 	rc = DosWaitThread(&g_cache_list.m_update_thread, DCWW_WAIT);
1499 }
1500 
1501 /* ================================================================= */
1502 
1503 #elif defined(SAL_W32)
1504 
1505 static DWORD WINAPI
1506 rtl_cache_wsupdate_all (void * arg);
1507 
1508 static void
rtl_cache_wsupdate_init(void)1509 rtl_cache_wsupdate_init (void)
1510 {
1511 	DWORD dwThreadId;
1512 
1513 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1514 	g_cache_list.m_update_done = 0;
1515 	g_cache_list.m_update_cond = CreateEvent (0, TRUE, FALSE, 0);
1516 
1517 	g_cache_list.m_update_thread =
1518 		CreateThread (NULL, 0, rtl_cache_wsupdate_all, (LPVOID)(10), 0, &dwThreadId);
1519 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1520 }
1521 
1522 static void
rtl_cache_wsupdate_wait(unsigned int seconds)1523 rtl_cache_wsupdate_wait (unsigned int seconds)
1524 {
1525 	if (seconds > 0)
1526 	{
1527 		RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1528 		WaitForSingleObject (g_cache_list.m_update_cond, (DWORD)(seconds * 1000));
1529 		RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1530 	}
1531 }
1532 
1533 static void
rtl_cache_wsupdate_fini(void)1534 rtl_cache_wsupdate_fini (void)
1535 {
1536 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1537 	g_cache_list.m_update_done = 1;
1538 	SetEvent (g_cache_list.m_update_cond);
1539 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1540 
1541 	WaitForSingleObject (g_cache_list.m_update_thread, INFINITE);
1542 }
1543 
1544 #endif /* SAL_UNX || SAL_W32 */
1545 
1546 /* ================================================================= */
1547 
1548 /** rtl_cache_depot_wsupdate()
1549  *  update depot stats and purge excess magazines.
1550  *
1551  *  @precond cache->m_depot_lock acquired
1552  */
1553 static void
rtl_cache_depot_wsupdate(rtl_cache_type * cache,rtl_cache_depot_type * depot)1554 rtl_cache_depot_wsupdate (
1555 	rtl_cache_type *       cache,
1556 	rtl_cache_depot_type * depot
1557 )
1558 {
1559 	sal_Size npurge;
1560 
1561 	depot->m_prev_min = depot->m_curr_min;
1562 	depot->m_curr_min = depot->m_mag_count;
1563 
1564 	npurge = SAL_MIN(depot->m_curr_min, depot->m_prev_min);
1565 	for (; npurge > 0; npurge--)
1566 	{
1567 		rtl_cache_magazine_type * mag = rtl_cache_depot_dequeue (depot);
1568 		if (mag != NULL)
1569 		{
1570 			RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1571 			rtl_cache_magazine_clear (cache, mag);
1572 			rtl_cache_free (cache->m_magazine_cache, mag);
1573 			RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1574 		}
1575 	}
1576 }
1577 
1578 /** rtl_cache_wsupdate()
1579  *
1580  *  @precond cache->m_depot_lock released
1581  */
1582 static void
rtl_cache_wsupdate(rtl_cache_type * cache)1583 rtl_cache_wsupdate (
1584 	rtl_cache_type * cache
1585 )
1586 {
1587 	if (cache->m_magazine_cache != 0)
1588 	{
1589 		RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
1590 
1591 		OSL_TRACE(
1592 			"rtl_cache_wsupdate(\"%s\") "
1593 			"[depot: count, curr_min, prev_min] "
1594 			"full: %lu, %lu, %lu; empty: %lu, %lu, %lu",
1595 			cache->m_name,
1596 			cache->m_depot_full.m_mag_count,
1597 			cache->m_depot_full.m_curr_min,
1598 			cache->m_depot_full.m_prev_min,
1599 			cache->m_depot_empty.m_mag_count,
1600 			cache->m_depot_empty.m_curr_min,
1601 			cache->m_depot_empty.m_prev_min
1602 		);
1603 
1604 		rtl_cache_depot_wsupdate (cache, &(cache->m_depot_full));
1605 		rtl_cache_depot_wsupdate (cache, &(cache->m_depot_empty));
1606 
1607 		RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
1608 	}
1609 }
1610 
1611 /** rtl_cache_wsupdate_all()
1612  *
1613  */
1614 #if defined(SAL_UNX)
1615 static void *
1616 #elif defined(SAL_OS2)
1617 static void
1618 #elif defined(SAL_W32)
1619 static DWORD WINAPI
1620 #endif /* SAL_UNX || SAL_W32 */
rtl_cache_wsupdate_all(void * arg)1621 rtl_cache_wsupdate_all (void * arg)
1622 {
1623 	unsigned int seconds = (unsigned int)SAL_INT_CAST(sal_uIntPtr, arg);
1624 
1625 	RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1626 	while (!g_cache_list.m_update_done)
1627 	{
1628 		rtl_cache_wsupdate_wait (seconds);
1629 		if (!g_cache_list.m_update_done)
1630 		{
1631 			rtl_cache_type * head, * cache;
1632 
1633 			head = &(g_cache_list.m_cache_head);
1634 			for (cache  = head->m_cache_next;
1635 				 cache != head;
1636 				 cache  = cache->m_cache_next)
1637 			{
1638 				rtl_cache_wsupdate (cache);
1639 			}
1640 		}
1641 	}
1642 	RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1643 
1644 #if !defined(SAL_OS2)
1645 	return (0);
1646 #endif
1647 }
1648 
1649 /* ================================================================= *
1650  *
1651  * cache initialization.
1652  *
1653  * ================================================================= */
1654 
1655 static void
rtl_cache_once_init(void)1656 rtl_cache_once_init (void)
1657 {
1658 	{
1659 		/* list of caches */
1660 		RTL_MEMORY_LOCK_INIT(&(g_cache_list.m_lock));
1661 		(void) rtl_cache_constructor (&(g_cache_list.m_cache_head));
1662 	}
1663 	{
1664 		/* cache: internal arena */
1665 		OSL_ASSERT(gp_cache_arena == NULL);
1666 
1667 		gp_cache_arena = rtl_arena_create (
1668 			"rtl_cache_internal_arena",
1669 			64,   /* quantum */
1670 			0,    /* no quantum caching */
1671 			NULL, /* default source */
1672 			rtl_arena_alloc,
1673 			rtl_arena_free,
1674 			0     /* flags */
1675 		);
1676 		OSL_ASSERT(gp_cache_arena != NULL);
1677 
1678 		/* check 'gp_default_arena' initialization */
1679 		OSL_ASSERT(gp_default_arena != NULL);
1680 	}
1681 	{
1682 		/* cache: magazine cache */
1683 		static rtl_cache_type g_cache_magazine_cache;
1684 
1685 		OSL_ASSERT(gp_cache_magazine_cache == NULL);
1686 		VALGRIND_CREATE_MEMPOOL(&g_cache_magazine_cache, 0, 0);
1687 		(void) rtl_cache_constructor (&g_cache_magazine_cache);
1688 
1689 		gp_cache_magazine_cache = rtl_cache_activate (
1690 			&g_cache_magazine_cache,
1691 			"rtl_cache_magazine_cache",
1692 			sizeof(rtl_cache_magazine_type), /* objsize  */
1693 			0,                               /* objalign */
1694 			rtl_cache_magazine_constructor,
1695 			rtl_cache_magazine_destructor,
1696 			0, /* reclaim */
1697 			0, /* userarg: NYI */
1698 			gp_default_arena, /* source */
1699 			RTL_CACHE_FLAG_NOMAGAZINE /* during bootstrap; activated below */
1700 		);
1701 		OSL_ASSERT(gp_cache_magazine_cache != NULL);
1702 
1703 		/* activate magazine layer */
1704 		g_cache_magazine_cache.m_magazine_cache = gp_cache_magazine_cache;
1705 	}
1706 	{
1707 		/* cache: slab (struct) cache */
1708 		static rtl_cache_type g_cache_slab_cache;
1709 
1710 		OSL_ASSERT(gp_cache_slab_cache == NULL);
1711 		VALGRIND_CREATE_MEMPOOL(&g_cache_slab_cache, 0, 0);
1712 		(void) rtl_cache_constructor (&g_cache_slab_cache);
1713 
1714 		gp_cache_slab_cache = rtl_cache_activate (
1715 			&g_cache_slab_cache,
1716 			"rtl_cache_slab_cache",
1717 			sizeof(rtl_cache_slab_type), /* objsize  */
1718 			0,                           /* objalign */
1719 			rtl_cache_slab_constructor,
1720 			rtl_cache_slab_destructor,
1721 			0,                           /* reclaim */
1722 			0,                           /* userarg: none */
1723 			gp_default_arena,            /* source */
1724 			0                            /* flags: none */
1725 		);
1726 		OSL_ASSERT(gp_cache_slab_cache != NULL);
1727 	}
1728 	{
1729 		/* cache: bufctl cache */
1730 		static rtl_cache_type g_cache_bufctl_cache;
1731 
1732 		OSL_ASSERT(gp_cache_bufctl_cache == NULL);
1733 		VALGRIND_CREATE_MEMPOOL(&g_cache_bufctl_cache, 0, 0);
1734 		(void) rtl_cache_constructor (&g_cache_bufctl_cache);
1735 
1736 		gp_cache_bufctl_cache = rtl_cache_activate (
1737 			&g_cache_bufctl_cache,
1738 			"rtl_cache_bufctl_cache",
1739 			sizeof(rtl_cache_bufctl_type), /* objsize */
1740 			0,                             /* objalign  */
1741 			0,                /* constructor */
1742 			0,                /* destructor */
1743 			0,                /* reclaim */
1744 			0,                /* userarg */
1745 			gp_default_arena, /* source */
1746 			0                 /* flags: none */
1747 		);
1748 		OSL_ASSERT(gp_cache_bufctl_cache != NULL);
1749 	}
1750 
1751 	rtl_cache_wsupdate_init();
1752 }
1753 
1754 static int
rtl_cache_init(void)1755 rtl_cache_init (void)
1756 {
1757 	static sal_once_type g_once = SAL_ONCE_INIT;
1758 	SAL_ONCE(&g_once, rtl_cache_once_init);
1759 	return (gp_cache_arena != NULL);
1760 }
1761 
1762 /* ================================================================= */
1763 
1764 /*
1765   Issue http://udk.openoffice.org/issues/show_bug.cgi?id=92388
1766 
1767   Mac OS X does not seem to support "__cxa__atexit", thus leading
1768   to the situation that "__attribute__((destructor))__" functions
1769   (in particular "rtl_{memory|cache|arena}_fini") become called
1770   _before_ global C++ object d'tors.
1771 
1772   Delegated the call to "rtl_cache_fini()" into a dummy C++ object,
1773   see alloc_fini.cxx .
1774 */
1775 #if defined(__GNUC__) && !defined(MACOSX) && !defined(SAL_OS2)
1776 static void rtl_cache_fini (void) __attribute__((destructor));
1777 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
1778 #pragma fini(rtl_cache_fini)
1779 static void rtl_cache_fini (void);
1780 #endif /* __GNUC__ || __SUNPRO_C */
1781 
1782 void
rtl_cache_fini(void)1783 rtl_cache_fini (void)
1784 {
1785 	if (gp_cache_arena != NULL)
1786 	{
1787 		rtl_cache_type * cache, * head;
1788 
1789 		rtl_cache_wsupdate_fini();
1790 
1791 		if (gp_cache_bufctl_cache != NULL)
1792 		{
1793 			cache = gp_cache_bufctl_cache, gp_cache_bufctl_cache = NULL;
1794 			rtl_cache_deactivate (cache);
1795 			rtl_cache_destructor (cache);
1796 			VALGRIND_DESTROY_MEMPOOL(cache);
1797 		}
1798 		if (gp_cache_slab_cache != NULL)
1799 		{
1800 			cache = gp_cache_slab_cache, gp_cache_slab_cache = NULL;
1801 			rtl_cache_deactivate (cache);
1802 			rtl_cache_destructor (cache);
1803 			VALGRIND_DESTROY_MEMPOOL(cache);
1804 		}
1805 		if (gp_cache_magazine_cache != NULL)
1806 		{
1807 			cache = gp_cache_magazine_cache, gp_cache_magazine_cache = NULL;
1808 			rtl_cache_deactivate (cache);
1809 			rtl_cache_destructor (cache);
1810 			VALGRIND_DESTROY_MEMPOOL(cache);
1811 		}
1812 		if (gp_cache_arena != NULL)
1813 		{
1814 			rtl_arena_destroy (gp_cache_arena);
1815 			gp_cache_arena = NULL;
1816 		}
1817 
1818 		RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
1819 		head = &(g_cache_list.m_cache_head);
1820 		for (cache = head->m_cache_next; cache != head; cache = cache->m_cache_next)
1821 		{
1822 			OSL_TRACE(
1823 				"rtl_cache_fini(\"%s\") "
1824 				"[slab]: allocs: %"PRIu64", frees: %"PRIu64"; total: %lu, used: %lu; "
1825 				"[cpu]: allocs: %"PRIu64", frees: %"PRIu64"; "
1826 				"[total]: allocs: %"PRIu64", frees: %"PRIu64"",
1827 				cache->m_name,
1828 				cache->m_slab_stats.m_alloc, cache->m_slab_stats.m_free,
1829 				cache->m_slab_stats.m_mem_total, cache->m_slab_stats.m_mem_alloc,
1830 				cache->m_cpu_stats.m_alloc, cache->m_cpu_stats.m_free,
1831 				cache->m_slab_stats.m_alloc + cache->m_cpu_stats.m_alloc,
1832 				cache->m_slab_stats.m_free + cache->m_cpu_stats.m_free
1833 			);
1834 		}
1835 		RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
1836 	}
1837 }
1838 
1839 /* ================================================================= */
1840