xref: /aoo41x/main/sal/rtl/source/alloc_arena.c (revision 38aa938a)
1 /**************************************************************
2  *
3  * Licensed to the Apache Software Foundation (ASF) under one
4  * or more contributor license agreements.  See the NOTICE file
5  * distributed with this work for additional information
6  * regarding copyright ownership.  The ASF licenses this file
7  * to you under the Apache License, Version 2.0 (the
8  * "License"); you may not use this file except in compliance
9  * with the License.  You may obtain a copy of the License at
10  *
11  *   http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing,
14  * software distributed under the License is distributed on an
15  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16  * KIND, either express or implied.  See the License for the
17  * specific language governing permissions and limitations
18  * under the License.
19  *
20  *************************************************************/
21 
22 
23 
24 #define _BSD_SOURCE /* sys/mman.h: MAP_ANON */
25 #include "alloc_arena.h"
26 
27 #include "alloc_impl.h"
28 #include "internal/once.h"
29 #include "sal/macros.h"
30 #include "osl/diagnose.h"
31 
32 #include <string.h>
33 #include <stdio.h>
34 
35 #ifdef OS2
36 #undef OSL_TRACE
37 #define OSL_TRACE                  1 ? ((void)0) : _OSL_GLOBAL osl_trace
38 #define INCL_DOS
39 #include <os2.h>
40 #endif
41 
42 /* ================================================================= *
43  *
44  * arena internals.
45  *
46  * ================================================================= */
47 
48 /** g_arena_list
49  *  @internal
50  */
51 struct rtl_arena_list_st
52 {
53 	rtl_memory_lock_type m_lock;
54 	rtl_arena_type       m_arena_head;
55 };
56 
57 static struct rtl_arena_list_st g_arena_list;
58 
59 
60 /** gp_arena_arena
61  *  provided for arena_type allocations, and hash_table resizing.
62  *
63  *  @internal
64  */
65 static rtl_arena_type * gp_arena_arena = 0;
66 
67 
68 /** gp_machdep_arena
69  *
70  *  Low level virtual memory (pseudo) arena
71  *  (platform dependent implementation)
72  *
73  *  @internal
74  */
75 static rtl_arena_type * gp_machdep_arena = 0;
76 
77 
78 static void *
79 SAL_CALL rtl_machdep_alloc (
80 	rtl_arena_type * pArena,
81 	sal_Size *       pSize
82 );
83 
84 static void
85 SAL_CALL rtl_machdep_free (
86 	rtl_arena_type * pArena,
87 	void *           pAddr,
88 	sal_Size         nSize
89 );
90 
91 static sal_Size
92 rtl_machdep_pagesize (void);
93 
94 
95 /** gp_default_arena
96  */
97 rtl_arena_type * gp_default_arena = 0;
98 
99 
100 /** rtl_arena_init()
101  *  @internal
102  */
103 static int
104 rtl_arena_init (void);
105 
106 
107 /* ================================================================= */
108 
109 /** rtl_arena_segment_constructor()
110  */
111 static int
rtl_arena_segment_constructor(void * obj)112 rtl_arena_segment_constructor (void * obj)
113 {
114 	rtl_arena_segment_type * segment = (rtl_arena_segment_type*)(obj);
115 
116 	QUEUE_START_NAMED(segment, s);
117 	QUEUE_START_NAMED(segment, f);
118 
119 	return (1);
120 }
121 
122 
123 /** rtl_arena_segment_destructor()
124  */
125 static void
rtl_arena_segment_destructor(void * obj)126 rtl_arena_segment_destructor (void * obj)
127 {
128 #if OSL_DEBUG_LEVEL == 0
129     (void) obj; /* unused */
130 #else /* OSL_DEBUG_LEVEL */
131 	rtl_arena_segment_type * segment = (rtl_arena_segment_type*)(obj);
132 
133 	OSL_ASSERT(QUEUE_STARTED_NAMED(segment, s));
134 	OSL_ASSERT(QUEUE_STARTED_NAMED(segment, f));
135 #endif /* OSL_DEBUG_LEVEL */
136 }
137 
138 /* ================================================================= */
139 
140 /** rtl_arena_segment_populate()
141  *
142  *  @precond  arena->m_lock acquired.
143  */
144 static int
rtl_arena_segment_populate(rtl_arena_type * arena)145 rtl_arena_segment_populate (
146 	rtl_arena_type * arena
147 )
148 {
149     rtl_arena_segment_type *span;
150 	sal_Size                size = rtl_machdep_pagesize();
151 
152     span = rtl_machdep_alloc(gp_machdep_arena, &size);
153 	if (span != 0)
154 	{
155         rtl_arena_segment_type *first, *last, *head;
156 		sal_Size                count = size / sizeof(rtl_arena_segment_type);
157 
158 		/* insert onto reserve span list */
159 		QUEUE_INSERT_TAIL_NAMED(&(arena->m_segment_reserve_span_head), span, s);
160 		QUEUE_START_NAMED(span, f);
161 		span->m_addr = (sal_uIntPtr)(span);
162 		span->m_size = size;
163 		span->m_type = RTL_ARENA_SEGMENT_TYPE_SPAN;
164 
165 		/* insert onto reserve list */
166 		head  = &(arena->m_segment_reserve_head);
167 		for (first = span + 1, last = span + count; first < last; ++first)
168 		{
169 		    QUEUE_INSERT_TAIL_NAMED(head, first, s);
170 			QUEUE_START_NAMED(first, f);
171 			first->m_addr = 0;
172 			first->m_size = 0;
173 			first->m_type = 0;
174 		}
175 	}
176 	return (span != 0);
177 }
178 
179 
180 /** rtl_arena_segment_get()
181  *
182  *  @precond  arena->m_lock acquired.
183  *  @precond  (*ppSegment == 0)
184  */
185 static RTL_MEMORY_INLINE void
rtl_arena_segment_get(rtl_arena_type * arena,rtl_arena_segment_type ** ppSegment)186 rtl_arena_segment_get (
187 	rtl_arena_type *          arena,
188 	rtl_arena_segment_type ** ppSegment
189 )
190 {
191     rtl_arena_segment_type * head;
192 
193     OSL_ASSERT(*ppSegment == 0);
194 
195     head = &(arena->m_segment_reserve_head);
196     if ((head->m_snext != head) || rtl_arena_segment_populate (arena))
197     {
198 		(*ppSegment) = head->m_snext;
199 		QUEUE_REMOVE_NAMED((*ppSegment), s);
200     }
201 }
202 
203 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
204 #pragma inline(rtl_arena_segment_get)
205 #endif
206 
207 
208 /** rtl_arena_segment_put()
209  *
210  *  @precond  arena->m_lock acquired.
211  *  @postcond (*ppSegment == 0)
212  */
213 static RTL_MEMORY_INLINE void
rtl_arena_segment_put(rtl_arena_type * arena,rtl_arena_segment_type ** ppSegment)214 rtl_arena_segment_put (
215 	rtl_arena_type *          arena,
216 	rtl_arena_segment_type ** ppSegment
217 )
218 {
219 	rtl_arena_segment_type * head;
220 
221 	OSL_ASSERT(QUEUE_STARTED_NAMED((*ppSegment), s));
222 	OSL_ASSERT(QUEUE_STARTED_NAMED((*ppSegment), f));
223 
224 	(*ppSegment)->m_addr = 0;
225 	(*ppSegment)->m_size = 0;
226 
227 	OSL_ASSERT((*ppSegment)->m_type != RTL_ARENA_SEGMENT_TYPE_HEAD);
228 	(*ppSegment)->m_type = 0;
229 
230 	/* keep as reserve */
231 	head = &(arena->m_segment_reserve_head);
232 	QUEUE_INSERT_HEAD_NAMED(head, (*ppSegment), s);
233 
234 	/* clear */
235 	(*ppSegment) = 0;
236 }
237 
238 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
239 #pragma inline(rtl_arena_segment_put)
240 #endif
241 
242 /* ================================================================= */
243 
244 /** rtl_arena_freelist_insert()
245  *
246  *  @precond arena->m_lock acquired.
247  */
248 static RTL_MEMORY_INLINE void
rtl_arena_freelist_insert(rtl_arena_type * arena,rtl_arena_segment_type * segment)249 rtl_arena_freelist_insert (
250 	rtl_arena_type *         arena,
251 	rtl_arena_segment_type * segment
252 )
253 {
254 	rtl_arena_segment_type * head;
255 
256 	head = &(arena->m_freelist_head[highbit(segment->m_size) - 1]);
257 	QUEUE_INSERT_TAIL_NAMED(head, segment, f);
258 
259 	arena->m_freelist_bitmap |= head->m_size;
260 }
261 
262 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
263 #pragma inline(rtl_arena_freelist_insert)
264 #endif /* __SUNPRO_C */
265 
266 
267 /** rtl_arena_freelist_remove()
268  *
269  *  @precond arena->m_lock acquired.
270  */
271 static RTL_MEMORY_INLINE void
rtl_arena_freelist_remove(rtl_arena_type * arena,rtl_arena_segment_type * segment)272 rtl_arena_freelist_remove (
273 	rtl_arena_type *         arena,
274 	rtl_arena_segment_type * segment
275 )
276 {
277 	if ((segment->m_fnext->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD) &&
278 		(segment->m_fprev->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD)    )
279 	{
280 		rtl_arena_segment_type * head;
281 
282 		head = segment->m_fprev;
283 		OSL_ASSERT(arena->m_freelist_bitmap & head->m_size);
284 		arena->m_freelist_bitmap ^= head->m_size;
285 	}
286 	QUEUE_REMOVE_NAMED(segment, f);
287 }
288 
289 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
290 #pragma inline(rtl_arena_freelist_remove)
291 #endif /* __SUNPRO_C */
292 
293 
294 /* ================================================================= */
295 
296 /** RTL_ARENA_HASH_INDEX()
297  */
298 #define	RTL_ARENA_HASH_INDEX_IMPL(a, s, q, m) \
299  	((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
300 
301 #define	RTL_ARENA_HASH_INDEX(arena, addr) \
302     RTL_ARENA_HASH_INDEX_IMPL((addr), (arena)->m_hash_shift, (arena)->m_quantum_shift, ((arena)->m_hash_size - 1))
303 
304 /** rtl_arena_hash_rescale()
305  *
306  * @precond arena->m_lock released.
307  */
308 static void
rtl_arena_hash_rescale(rtl_arena_type * arena,sal_Size new_size)309 rtl_arena_hash_rescale (
310 	rtl_arena_type * arena,
311 	sal_Size         new_size
312 )
313 {
314 	rtl_arena_segment_type ** new_table;
315 	sal_Size                  new_bytes;
316 
317 	new_bytes = new_size * sizeof(rtl_arena_segment_type*);
318 	new_table = (rtl_arena_segment_type **)rtl_arena_alloc (gp_arena_arena, &new_bytes);
319 
320 	if (new_table != 0)
321 	{
322 		rtl_arena_segment_type ** old_table;
323 		sal_Size                  old_size, i;
324 
325 		memset (new_table, 0, new_bytes);
326 
327 		RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
328 
329 		old_table = arena->m_hash_table;
330 		old_size  = arena->m_hash_size;
331 
332 		OSL_TRACE(
333 			"rtl_arena_hash_rescale(\"%s\"): "
334 			"nseg: %"PRIu64" (ave: %"PRIu64"), frees: %"PRIu64" "
335 			"[old_size: %lu, new_size: %lu]",
336 			arena->m_name,
337 			arena->m_stats.m_alloc - arena->m_stats.m_free,
338 			(arena->m_stats.m_alloc - arena->m_stats.m_free) >> arena->m_hash_shift,
339 			arena->m_stats.m_free,
340 			old_size, new_size
341 		);
342 
343 #if 0  /* DBG */
344 		for (i = 0; i < arena->m_hash_size; i++)
345 		{
346 			sal_Size k = 0; rtl_arena_segment_type ** segpp = &(arena->m_hash_table[i]);
347 			while (*segpp)
348 			{
349 				k += 1;
350 				segpp = &((*segpp)->m_fnext);
351 			}
352 			fprintf(stdout, "%d, ", k);
353 		}
354 		fprintf(stdout, "\n");
355 #endif /* DBG */
356 
357 		arena->m_hash_table = new_table;
358 		arena->m_hash_size  = new_size;
359 		arena->m_hash_shift = highbit(arena->m_hash_size) - 1;
360 
361 		for (i = 0; i < old_size; i++)
362 		{
363 			rtl_arena_segment_type * curr = old_table[i];
364 			while (curr != 0)
365 			{
366 				rtl_arena_segment_type  * next = curr->m_fnext;
367 				rtl_arena_segment_type ** head;
368 
369 				head = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, curr->m_addr)]);
370 				curr->m_fnext = (*head);
371 				(*head) = curr;
372 
373 				curr = next;
374 			}
375 			old_table[i] = 0;
376 		}
377 
378 		RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
379 
380 		if (old_table != arena->m_hash_table_0)
381 		{
382 			sal_Size old_bytes = old_size * sizeof(rtl_arena_segment_type*);
383 			rtl_arena_free (gp_arena_arena, old_table, old_bytes);
384 		}
385 	}
386 }
387 
388 
389 /** rtl_arena_hash_insert()
390  *  ...and update stats.
391  */
392 static RTL_MEMORY_INLINE void
rtl_arena_hash_insert(rtl_arena_type * arena,rtl_arena_segment_type * segment)393 rtl_arena_hash_insert (
394 	rtl_arena_type *         arena,
395 	rtl_arena_segment_type * segment
396 )
397 {
398 	rtl_arena_segment_type ** ppSegment;
399 
400 	ppSegment = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, segment->m_addr)]);
401 
402 	segment->m_fnext = (*ppSegment);
403 	(*ppSegment) = segment;
404 
405 	arena->m_stats.m_alloc     += 1;
406 	arena->m_stats.m_mem_alloc += segment->m_size;
407 }
408 
409 #if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
410 #pragma inline(rtl_arena_hash_insert)
411 #endif /* __SUNPRO_C */
412 
413 
414 /** rtl_arena_hash_remove()
415  *  ...and update stats.
416  */
417 static rtl_arena_segment_type *
rtl_arena_hash_remove(rtl_arena_type * arena,sal_uIntPtr addr,sal_Size size)418 rtl_arena_hash_remove (
419 	rtl_arena_type * arena,
420 	sal_uIntPtr      addr,
421 	sal_Size         size
422 )
423 {
424 	rtl_arena_segment_type *segment, **segpp;
425 	sal_Size lookups = 0;
426 
427 #if OSL_DEBUG_LEVEL == 0
428     (void) size; /* unused */
429 #endif /* OSL_DEBUG_LEVEL */
430 
431 	segpp = &(arena->m_hash_table[RTL_ARENA_HASH_INDEX(arena, addr)]);
432 	while ((segment = *segpp) != 0)
433 	{
434 		if (segment->m_addr == addr)
435 		{
436 			*segpp = segment->m_fnext, segment->m_fnext = segment->m_fprev = segment;
437 			break;
438 		}
439 
440 		/* update lookup miss stats */
441 		lookups += 1;
442 		segpp = &(segment->m_fnext);
443 	}
444 
445 	OSL_POSTCOND(segment != 0, "rtl_arena_hash_remove(): bad free.");
446 	if (segment != 0)
447 	{
448 		OSL_POSTCOND(segment->m_size == size, "rtl_arena_hash_remove(): wrong size.");
449 
450 		arena->m_stats.m_free      += 1;
451 		arena->m_stats.m_mem_alloc -= segment->m_size;
452 
453 		if (lookups > 1)
454 		{
455 			sal_Size nseg = (sal_Size)(arena->m_stats.m_alloc - arena->m_stats.m_free);
456 			if (nseg > 4 * arena->m_hash_size)
457 			{
458 				if (!(arena->m_flags & RTL_ARENA_FLAG_RESCALE))
459 				{
460 					sal_Size ave = nseg >> arena->m_hash_shift;
461 					sal_Size new_size = arena->m_hash_size << (highbit(ave) - 1);
462 
463 					arena->m_flags |= RTL_ARENA_FLAG_RESCALE;
464 					RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
465 					rtl_arena_hash_rescale (arena, new_size);
466 					RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
467 					arena->m_flags &= ~RTL_ARENA_FLAG_RESCALE;
468 				}
469 			}
470 		}
471 	}
472 
473 	return (segment);
474 }
475 
476 /* ================================================================= */
477 
478 /** rtl_arena_segment_alloc()
479  *  allocate (and remove) segment from freelist
480  *
481  *  @precond arena->m_lock acquired
482  *  @precond (*ppSegment == 0)
483  */
484 static int
rtl_arena_segment_alloc(rtl_arena_type * arena,sal_Size size,rtl_arena_segment_type ** ppSegment)485 rtl_arena_segment_alloc (
486 	rtl_arena_type *          arena,
487 	sal_Size                  size,
488 	rtl_arena_segment_type ** ppSegment
489 )
490 {
491 	int index = 0;
492 
493 	OSL_ASSERT(*ppSegment == 0);
494 	if (!RTL_MEMORY_ISP2(size))
495 	{
496 		int msb = highbit(size);
497 		if (RTL_ARENA_FREELIST_SIZE == SAL_INT_CAST(size_t, msb))
498 		{
499 			/* highest possible freelist: fall back to first fit */
500 			rtl_arena_segment_type *head, *segment;
501 
502 			head = &(arena->m_freelist_head[msb - 1]);
503 			for (segment = head->m_fnext; segment != head; segment = segment->m_fnext)
504 			{
505 				if (segment->m_size >= size)
506 				{
507 					/* allocate first fit segment */
508 					(*ppSegment) = segment;
509 					break;
510 				}
511 			}
512 			goto dequeue_and_leave;
513 		}
514 
515 		/* roundup to next power of 2 */
516 		size = (1UL << msb);
517 	}
518 
519 	index = lowbit(RTL_MEMORY_P2ALIGN(arena->m_freelist_bitmap, size));
520 	if (index > 0)
521 	{
522 		/* instant fit: allocate first free segment */
523 		rtl_arena_segment_type *head;
524 
525 		head = &(arena->m_freelist_head[index - 1]);
526 		(*ppSegment) = head->m_fnext;
527 		OSL_ASSERT((*ppSegment) != head);
528 	}
529 
530 dequeue_and_leave:
531 	if (*ppSegment != 0)
532 	{
533 		/* remove from freelist */
534 		rtl_arena_freelist_remove (arena, (*ppSegment));
535 	}
536 	return (*ppSegment != 0);
537 }
538 
539 
540 /** rtl_arena_segment_create()
541  *  import new (span) segment from source arena
542  *
543  *  @precond arena->m_lock acquired
544  *  @precond (*ppSegment == 0)
545  */
546 static int
rtl_arena_segment_create(rtl_arena_type * arena,sal_Size size,rtl_arena_segment_type ** ppSegment)547 rtl_arena_segment_create (
548 	rtl_arena_type *          arena,
549 	sal_Size                  size,
550 	rtl_arena_segment_type ** ppSegment
551 )
552 {
553 	OSL_ASSERT((*ppSegment) == 0);
554 	if (arena->m_source_alloc != 0)
555 	{
556 		rtl_arena_segment_get (arena, ppSegment);
557 		if (*ppSegment != 0)
558 		{
559 			rtl_arena_segment_type * span = 0;
560 			rtl_arena_segment_get (arena, &span);
561 			if (span != 0)
562 			{
563 				/* import new span from source arena */
564 				RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
565 
566 				span->m_size = size;
567 				span->m_addr = (sal_uIntPtr)(arena->m_source_alloc)(
568 					arena->m_source_arena, &(span->m_size));
569 
570 				RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
571 				if (span->m_addr != 0)
572 				{
573 					/* insert onto segment list, update stats */
574 					span->m_type = RTL_ARENA_SEGMENT_TYPE_SPAN;
575 					QUEUE_INSERT_HEAD_NAMED(&(arena->m_segment_head), span, s);
576 					arena->m_stats.m_mem_total += span->m_size;
577 
578 					(*ppSegment)->m_addr = span->m_addr;
579 					(*ppSegment)->m_size = span->m_size;
580 					(*ppSegment)->m_type = RTL_ARENA_SEGMENT_TYPE_FREE;
581 					QUEUE_INSERT_HEAD_NAMED(span, (*ppSegment), s);
582 
583 					/* report success */
584 					return (1);
585 				}
586 				rtl_arena_segment_put (arena, &span);
587 			}
588 			rtl_arena_segment_put (arena, ppSegment);
589 		}
590 	}
591 	return (0);
592 }
593 
594 
595 /** rtl_arena_segment_coalesce()
596  *  mark as free and join with adjacent free segment(s)
597  *
598  *  @precond arena->m_lock acquired
599  *  @precond segment marked 'used'
600  */
601 static void
rtl_arena_segment_coalesce(rtl_arena_type * arena,rtl_arena_segment_type * segment)602 rtl_arena_segment_coalesce (
603 	rtl_arena_type *         arena,
604 	rtl_arena_segment_type * segment
605 )
606 {
607 	rtl_arena_segment_type *next, *prev;
608 
609 	/* mark segment free */
610 	OSL_ASSERT(segment->m_type == RTL_ARENA_SEGMENT_TYPE_USED);
611 	segment->m_type = RTL_ARENA_SEGMENT_TYPE_FREE;
612 
613 	/* try to merge w/ next segment */
614 	next = segment->m_snext;
615 	if (next->m_type == RTL_ARENA_SEGMENT_TYPE_FREE)
616 	{
617 		OSL_ASSERT(segment->m_addr + segment->m_size == next->m_addr);
618 		segment->m_size += next->m_size;
619 
620 		/* remove from freelist */
621 		rtl_arena_freelist_remove (arena, next);
622 
623 		/* remove from segment list */
624 		QUEUE_REMOVE_NAMED(next, s);
625 
626 		/* release segment descriptor */
627 		rtl_arena_segment_put (arena, &next);
628 	}
629 
630 	/* try to merge w/ prev segment */
631 	prev = segment->m_sprev;
632 	if (prev->m_type == RTL_ARENA_SEGMENT_TYPE_FREE)
633 	{
634 		OSL_ASSERT(prev->m_addr + prev->m_size == segment->m_addr);
635 		segment->m_addr  = prev->m_addr;
636 		segment->m_size += prev->m_size;
637 
638 		/* remove from freelist */
639 		rtl_arena_freelist_remove (arena, prev);
640 
641 		/* remove from segment list */
642 		QUEUE_REMOVE_NAMED(prev, s);
643 
644 		/* release segment descriptor */
645 		rtl_arena_segment_put (arena, &prev);
646 	}
647 }
648 
649 /* ================================================================= */
650 
651 /** rtl_arena_constructor()
652  */
653 static void
rtl_arena_constructor(void * obj)654 rtl_arena_constructor (void * obj)
655 {
656 	rtl_arena_type * arena = (rtl_arena_type*)(obj);
657 	rtl_arena_segment_type * head;
658 	size_t i;
659 
660 	memset (arena, 0, sizeof(rtl_arena_type));
661 
662 	QUEUE_START_NAMED(arena, arena_);
663 
664 	(void) RTL_MEMORY_LOCK_INIT(&(arena->m_lock));
665 
666 	head = &(arena->m_segment_reserve_span_head);
667 	rtl_arena_segment_constructor (head);
668 	head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
669 
670 	head = &(arena->m_segment_reserve_head);
671 	rtl_arena_segment_constructor (head);
672 	head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
673 
674 	head = &(arena->m_segment_head);
675 	rtl_arena_segment_constructor (head);
676 	head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
677 
678 	for (i = 0; i < RTL_ARENA_FREELIST_SIZE; i++)
679 	{
680 		head = &(arena->m_freelist_head[i]);
681 		rtl_arena_segment_constructor (head);
682 
683 		head->m_size = (1UL << i);
684 		head->m_type = RTL_ARENA_SEGMENT_TYPE_HEAD;
685 	}
686 
687 	arena->m_hash_table = arena->m_hash_table_0;
688 	arena->m_hash_size  = RTL_ARENA_HASH_SIZE;
689 	arena->m_hash_shift = highbit(arena->m_hash_size) - 1;
690 }
691 
692 
693 /** rtl_arena_destructor()
694  */
695 static void
rtl_arena_destructor(void * obj)696 rtl_arena_destructor (void * obj)
697 {
698 	rtl_arena_type * arena = (rtl_arena_type*)(obj);
699 	rtl_arena_segment_type * head;
700 	size_t i;
701 
702 	OSL_ASSERT(QUEUE_STARTED_NAMED(arena, arena_));
703 
704 	RTL_MEMORY_LOCK_DESTROY(&(arena->m_lock));
705 
706 	head = &(arena->m_segment_reserve_span_head);
707 	OSL_ASSERT(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
708 	rtl_arena_segment_destructor (head);
709 
710 	head = &(arena->m_segment_reserve_head);
711 	OSL_ASSERT(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
712 	rtl_arena_segment_destructor (head);
713 
714 	head = &(arena->m_segment_head);
715 	OSL_ASSERT(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
716 	rtl_arena_segment_destructor (head);
717 
718 	for (i = 0; i < RTL_ARENA_FREELIST_SIZE; i++)
719 	{
720 		head = &(arena->m_freelist_head[i]);
721 
722 		OSL_ASSERT(head->m_size == (1UL << i));
723 		OSL_ASSERT(head->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD);
724 
725 		rtl_arena_segment_destructor (head);
726 	}
727 
728 	OSL_ASSERT(arena->m_hash_table == arena->m_hash_table_0);
729 	OSL_ASSERT(arena->m_hash_size  == RTL_ARENA_HASH_SIZE);
730 	OSL_ASSERT(
731         arena->m_hash_shift ==
732         SAL_INT_CAST(unsigned, highbit(arena->m_hash_size) - 1));
733 }
734 
735 /* ================================================================= */
736 
737 /** rtl_arena_activate()
738  */
739 static rtl_arena_type *
rtl_arena_activate(rtl_arena_type * arena,const char * name,sal_Size quantum,sal_Size quantum_cache_max,rtl_arena_type * source_arena,void * (SAL_CALL * source_alloc)(rtl_arena_type *,sal_Size *),void (SAL_CALL * source_free)(rtl_arena_type *,void *,sal_Size))740 rtl_arena_activate (
741 	rtl_arena_type *   arena,
742 	const char *       name,
743 	sal_Size           quantum,
744 	sal_Size           quantum_cache_max,
745 	rtl_arena_type *   source_arena,
746 	void * (SAL_CALL * source_alloc)(rtl_arena_type *, sal_Size *),
747 	void   (SAL_CALL * source_free) (rtl_arena_type *, void *, sal_Size)
748 )
749 {
750 	OSL_ASSERT(arena != 0);
751 	if (arena != 0)
752 	{
753 		(void) snprintf (arena->m_name, sizeof(arena->m_name), "%s", name);
754 
755 		if (!RTL_MEMORY_ISP2(quantum))
756 		{
757 			/* roundup to next power of 2 */
758 			quantum = (1UL << highbit(quantum));
759 		}
760 		quantum_cache_max = RTL_MEMORY_P2ROUNDUP(quantum_cache_max, quantum);
761 
762 		arena->m_quantum = quantum;
763 		arena->m_quantum_shift = highbit(arena->m_quantum) - 1;
764 		arena->m_qcache_max = quantum_cache_max;
765 
766 		arena->m_source_arena = source_arena;
767 		arena->m_source_alloc = source_alloc;
768 		arena->m_source_free  = source_free;
769 
770 		if (arena->m_qcache_max > 0)
771 		{
772 			char name[RTL_ARENA_NAME_LENGTH + 1];
773 			int  i, n = (arena->m_qcache_max >> arena->m_quantum_shift);
774 
775 			sal_Size size = n * sizeof(rtl_cache_type*);
776 			arena->m_qcache_ptr = (rtl_cache_type**)rtl_arena_alloc (gp_arena_arena, &size);
777 			if (!(arena->m_qcache_ptr))
778 			{
779 				/* out of memory */
780 				return (0);
781 			}
782 			for (i = 1; i <= n; i++)
783 			{
784 				size = i * arena->m_quantum;
785 				(void) snprintf (name, sizeof(name), "%s_%lu", arena->m_name, size);
786 				arena->m_qcache_ptr[i - 1] = rtl_cache_create(name, size, 0, NULL, NULL, NULL, NULL, arena, RTL_CACHE_FLAG_QUANTUMCACHE);
787 			}
788 		}
789 
790 		/* insert into arena list */
791 		RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock));
792 		QUEUE_INSERT_TAIL_NAMED(&(g_arena_list.m_arena_head), arena, arena_);
793 		RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock));
794 	}
795 	return (arena);
796 }
797 
798 /** rtl_arena_deactivate()
799  */
800 static void
rtl_arena_deactivate(rtl_arena_type * arena)801 rtl_arena_deactivate (
802 	rtl_arena_type * arena
803 )
804 {
805 	rtl_arena_segment_type * head, * segment;
806 
807 	/* remove from arena list */
808 	RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock));
809 	QUEUE_REMOVE_NAMED(arena, arena_);
810 	RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock));
811 
812 	/* cleanup quantum cache(s) */
813 	if ((arena->m_qcache_max > 0) && (arena->m_qcache_ptr != 0))
814 	{
815 		int  i, n = (arena->m_qcache_max >> arena->m_quantum_shift);
816 		for (i = 1; i <= n; i++)
817 		{
818 			if (arena->m_qcache_ptr[i - 1] != 0)
819 			{
820 				rtl_cache_destroy (arena->m_qcache_ptr[i - 1]);
821 				arena->m_qcache_ptr[i - 1] = 0;
822 			}
823 		}
824 		rtl_arena_free (
825 			gp_arena_arena,
826 			arena->m_qcache_ptr,
827 			n * sizeof(rtl_cache_type*));
828 
829 		arena->m_qcache_ptr = 0;
830 	}
831 
832 	/* check for leaked segments */
833 	OSL_TRACE(
834 		"rtl_arena_deactivate(\"%s\"): "
835 		"allocs: %"PRIu64", frees: %"PRIu64"; total: %lu, used: %lu",
836 		arena->m_name,
837 		arena->m_stats.m_alloc, arena->m_stats.m_free,
838 		arena->m_stats.m_mem_total, arena->m_stats.m_mem_alloc
839 	);
840 	if (arena->m_stats.m_alloc > arena->m_stats.m_free)
841 	{
842 		sal_Size i, n;
843 
844 		OSL_TRACE(
845 			"rtl_arena_deactivate(\"%s\"): "
846 			"cleaning up %"PRIu64" leaked segment(s) [%lu bytes]",
847 			arena->m_name,
848 			arena->m_stats.m_alloc - arena->m_stats.m_free,
849 			arena->m_stats.m_mem_alloc
850 		);
851 
852 		/* cleanup still used segment(s) */
853 		for (i = 0, n = arena->m_hash_size; i < n; i++)
854 		{
855 			while ((segment = arena->m_hash_table[i]) != 0)
856 			{
857 				/* pop from hash table */
858 				arena->m_hash_table[i] = segment->m_fnext, segment->m_fnext = segment->m_fprev = segment;
859 
860 				/* coalesce w/ adjacent free segment(s) */
861 				rtl_arena_segment_coalesce (arena, segment);
862 
863 				/* insert onto freelist */
864 				rtl_arena_freelist_insert (arena, segment);
865 			}
866 		}
867 	}
868 
869 	/* cleanup hash table */
870 	if (arena->m_hash_table != arena->m_hash_table_0)
871 	{
872 		rtl_arena_free (
873 			gp_arena_arena,
874 			arena->m_hash_table,
875 			arena->m_hash_size * sizeof(rtl_arena_segment_type*));
876 
877 		arena->m_hash_table = arena->m_hash_table_0;
878 		arena->m_hash_size  = RTL_ARENA_HASH_SIZE;
879 		arena->m_hash_shift = highbit(arena->m_hash_size) - 1;
880 	}
881 
882 	/* cleanup segment list */
883 	head = &(arena->m_segment_head);
884 	for (segment = head->m_snext; segment != head; segment = head->m_snext)
885 	{
886 		if (segment->m_type == RTL_ARENA_SEGMENT_TYPE_FREE)
887 		{
888 			/* remove from freelist */
889 			rtl_arena_freelist_remove (arena, segment);
890 		}
891 		else
892 		{
893 			/* can have only free and span segments here */
894 			OSL_ASSERT(segment->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN);
895 		}
896 
897 		/* remove from segment list */
898 		QUEUE_REMOVE_NAMED(segment, s);
899 
900 		/* release segment descriptor */
901 		rtl_arena_segment_put (arena, &segment);
902 	}
903 
904 	/* cleanup segment reserve list */
905 	head = &(arena->m_segment_reserve_head);
906 	for (segment = head->m_snext; segment != head; segment = head->m_snext)
907 	{
908 		/* remove from segment list */
909 		QUEUE_REMOVE_NAMED(segment, s);
910 	}
911 
912 	/* cleanup segment reserve span(s) */
913 	head = &(arena->m_segment_reserve_span_head);
914 	for (segment = head->m_snext; segment != head; segment = head->m_snext)
915 	{
916 		/* can have only span segments here */
917 		OSL_ASSERT(segment->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN);
918 
919 		/* remove from segment list */
920 		QUEUE_REMOVE_NAMED(segment, s);
921 
922 		/* return span to g_machdep_arena */
923 		rtl_machdep_free (gp_machdep_arena, (void*)(segment->m_addr), segment->m_size);
924 	}
925 }
926 
927 /* ================================================================= *
928  *
929  * arena implementation.
930  *
931  * ================================================================= */
932 
933 /** rtl_arena_create()
934  */
935 rtl_arena_type *
rtl_arena_create(const char * name,sal_Size quantum,sal_Size quantum_cache_max,rtl_arena_type * source_arena,void * (SAL_CALL * source_alloc)(rtl_arena_type *,sal_Size *),void (SAL_CALL * source_free)(rtl_arena_type *,void *,sal_Size),int flags)936 SAL_CALL rtl_arena_create (
937 	const char *       name,
938 	sal_Size           quantum,
939 	sal_Size           quantum_cache_max,
940 	rtl_arena_type *   source_arena,
941 	void * (SAL_CALL * source_alloc)(rtl_arena_type *, sal_Size *),
942 	void   (SAL_CALL * source_free) (rtl_arena_type *, void *, sal_Size),
943 	int                flags
944 ) SAL_THROW_EXTERN_C()
945 {
946 	rtl_arena_type * result = 0;
947 	sal_Size         size   = sizeof(rtl_arena_type);
948 
949     (void) flags; /* unused */
950 
951 try_alloc:
952 	result = (rtl_arena_type*)rtl_arena_alloc (gp_arena_arena, &size);
953 	if (result != 0)
954 	{
955 		rtl_arena_type * arena = result;
956 		VALGRIND_CREATE_MEMPOOL(arena, 0, 0);
957 		rtl_arena_constructor (arena);
958 
959 		if (!source_arena)
960 		{
961 			OSL_ASSERT(gp_default_arena != 0);
962 			source_arena = gp_default_arena;
963 		}
964 
965 		result = rtl_arena_activate (
966 			arena,
967 			name,
968 			quantum,
969 			quantum_cache_max,
970 			source_arena,
971 			source_alloc,
972 			source_free
973 		);
974 
975 		if (result == 0)
976 		{
977 			rtl_arena_deactivate (arena);
978 			rtl_arena_destructor (arena);
979 			VALGRIND_DESTROY_MEMPOOL(arena);
980 			rtl_arena_free (gp_arena_arena, arena, size);
981 		}
982 	}
983 	else if (gp_arena_arena == 0)
984 	{
985 		if (rtl_arena_init())
986 		{
987 			/* try again */
988 			goto try_alloc;
989 		}
990 	}
991 	return (result);
992 }
993 
994 /** rtl_arena_destroy()
995  */
996 void
rtl_arena_destroy(rtl_arena_type * arena)997 SAL_CALL rtl_arena_destroy (
998 	rtl_arena_type * arena
999 )
1000 {
1001 	if (arena != 0)
1002 	{
1003 		rtl_arena_deactivate (arena);
1004 		rtl_arena_destructor (arena);
1005 		VALGRIND_DESTROY_MEMPOOL(arena);
1006 		rtl_arena_free (gp_arena_arena, arena, sizeof(rtl_arena_type));
1007 	}
1008 }
1009 
1010 /** rtl_arena_alloc()
1011  */
1012 void *
rtl_arena_alloc(rtl_arena_type * arena,sal_Size * pSize)1013 SAL_CALL rtl_arena_alloc (
1014 	rtl_arena_type * arena,
1015 	sal_Size *       pSize
1016 ) SAL_THROW_EXTERN_C()
1017 {
1018 	void * addr = 0;
1019 
1020 	if ((arena != 0) && (pSize != 0))
1021 	{
1022 		sal_Size size = RTL_MEMORY_ALIGN((*pSize), arena->m_quantum);
1023 		if (size > arena->m_qcache_max)
1024 		{
1025 			/* allocate from segment list */
1026 			rtl_arena_segment_type *segment = 0;
1027 
1028 			RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
1029 			if (rtl_arena_segment_alloc (arena, size, &segment) ||
1030 				rtl_arena_segment_create(arena, size, &segment)    )
1031 			{
1032 				/* shrink to fit */
1033 				sal_Size oversize;
1034 
1035 				/* mark segment used */
1036 				OSL_ASSERT(segment->m_type == RTL_ARENA_SEGMENT_TYPE_FREE);
1037 				segment->m_type = RTL_ARENA_SEGMENT_TYPE_USED;
1038 
1039 				/* resize */
1040 				OSL_ASSERT(segment->m_size >= size);
1041 				oversize = segment->m_size - size;
1042 				if (oversize >= SAL_MAX(arena->m_quantum, arena->m_qcache_max))
1043 				{
1044 					rtl_arena_segment_type * remainder = 0;
1045 					rtl_arena_segment_get (arena, &remainder);
1046 					if (remainder != 0)
1047 					{
1048 						segment->m_size = size;
1049 
1050 						remainder->m_addr = segment->m_addr + segment->m_size;
1051 						remainder->m_size = oversize;
1052 						remainder->m_type = RTL_ARENA_SEGMENT_TYPE_FREE;
1053 						QUEUE_INSERT_HEAD_NAMED(segment, remainder, s);
1054 
1055 						rtl_arena_freelist_insert (arena, remainder);
1056 					}
1057 				}
1058 
1059 				rtl_arena_hash_insert (arena, segment);
1060 
1061                 /* DEBUG ONLY: mark allocated, undefined */
1062 				OSL_DEBUG_ONLY(memset((void*)(segment->m_addr), 0x77777777, segment->m_size));
1063 				VALGRIND_MEMPOOL_ALLOC(arena, segment->m_addr, segment->m_size);
1064 
1065 				(*pSize) = segment->m_size;
1066 				addr = (void*)(segment->m_addr);
1067 			}
1068 			RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
1069 		}
1070 		else if (size > 0)
1071 		{
1072 			/* allocate from quantum cache(s) */
1073 			int index = (size >> arena->m_quantum_shift) - 1;
1074 			OSL_ASSERT (arena->m_qcache_ptr[index] != 0);
1075 
1076 			addr = rtl_cache_alloc (arena->m_qcache_ptr[index]);
1077 			if (addr != 0)
1078 				(*pSize) = size;
1079 		}
1080 	}
1081 	return (addr);
1082 }
1083 
1084 /** rtl_arena_free()
1085  */
1086 void
rtl_arena_free(rtl_arena_type * arena,void * addr,sal_Size size)1087 SAL_CALL rtl_arena_free (
1088 	rtl_arena_type * arena,
1089 	void *           addr,
1090 	sal_Size         size
1091 ) SAL_THROW_EXTERN_C()
1092 {
1093 	if (arena != 0)
1094 	{
1095 		size = RTL_MEMORY_ALIGN(size, arena->m_quantum);
1096 		if (size > arena->m_qcache_max)
1097 		{
1098 			/* free to segment list */
1099 			rtl_arena_segment_type * segment;
1100 
1101 			RTL_MEMORY_LOCK_ACQUIRE(&(arena->m_lock));
1102 
1103 			segment = rtl_arena_hash_remove (arena, (sal_uIntPtr)(addr), size);
1104 			if (segment != 0)
1105 			{
1106 				rtl_arena_segment_type *next, *prev;
1107 
1108 				/* DEBUG ONLY: mark unallocated, undefined */
1109 				VALGRIND_MEMPOOL_FREE(arena, segment->m_addr);
1110                 /* OSL_DEBUG_ONLY() */ VALGRIND_MAKE_MEM_UNDEFINED(segment->m_addr, segment->m_size);
1111                 OSL_DEBUG_ONLY(memset((void*)(segment->m_addr), 0x33333333, segment->m_size));
1112 
1113 				/* coalesce w/ adjacent free segment(s) */
1114 				rtl_arena_segment_coalesce (arena, segment);
1115 
1116 				/* determine (new) next and prev segment */
1117 				next = segment->m_snext, prev = segment->m_sprev;
1118 
1119 				/* entire span free when prev is a span, and next is either a span or a list head */
1120 				if (((prev->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN)) &&
1121 					((next->m_type == RTL_ARENA_SEGMENT_TYPE_SPAN)  ||
1122 					 (next->m_type == RTL_ARENA_SEGMENT_TYPE_HEAD))    )
1123 				{
1124 					OSL_ASSERT((prev->m_addr == segment->m_addr) &&
1125 							   (prev->m_size == segment->m_size)    );
1126 
1127 					if (arena->m_source_free)
1128 					{
1129 						addr = (void*)(prev->m_addr);
1130 						size = prev->m_size;
1131 
1132 						/* remove from segment list */
1133 						QUEUE_REMOVE_NAMED(segment, s);
1134 
1135 						/* release segment descriptor */
1136 						rtl_arena_segment_put (arena, &segment);
1137 
1138 						/* remove from segment list */
1139 						QUEUE_REMOVE_NAMED(prev, s);
1140 
1141 						/* release (span) segment descriptor */
1142 						rtl_arena_segment_put (arena, &prev);
1143 
1144 						/* update stats, return span to source arena */
1145 						arena->m_stats.m_mem_total -= size;
1146 						RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
1147 
1148 						(arena->m_source_free)(arena->m_source_arena, addr, size);
1149 						return;
1150 					}
1151 				}
1152 
1153 				/* insert onto freelist */
1154 				rtl_arena_freelist_insert (arena, segment);
1155 			}
1156 
1157 			RTL_MEMORY_LOCK_RELEASE(&(arena->m_lock));
1158 		}
1159 		else if (size > 0)
1160 		{
1161 			/* free to quantum cache(s) */
1162 			int index = (size >> arena->m_quantum_shift) - 1;
1163 			OSL_ASSERT (arena->m_qcache_ptr[index] != 0);
1164 
1165 			rtl_cache_free (arena->m_qcache_ptr[index], addr);
1166 		}
1167 	}
1168 }
1169 
1170 /* ================================================================= *
1171  *
1172  * machdep internals.
1173  *
1174  * ================================================================= */
1175 
1176 #if defined(SAL_UNX)
1177 #include <sys/mman.h>
1178 #elif defined(SAL_W32) || defined(SAL_OS2)
1179 #define MAP_FAILED 0
1180 #endif /* SAL_UNX || SAL_W32 */
1181 
1182 /** rtl_machdep_alloc()
1183  */
1184 static void *
rtl_machdep_alloc(rtl_arena_type * pArena,sal_Size * pSize)1185 SAL_CALL rtl_machdep_alloc (
1186 	rtl_arena_type * pArena,
1187 	sal_Size *       pSize
1188 )
1189 {
1190 	void *   addr;
1191 	sal_Size size = (*pSize);
1192 
1193 	OSL_PRECOND(pArena == gp_machdep_arena, "rtl_machdep_alloc(): invalid argument");
1194 
1195 #if defined(SOLARIS) && defined(SPARC)
1196 	/* see @ mmap(2) man pages */
1197 	size += (pArena->m_quantum + pArena->m_quantum); /* "red-zone" pages */
1198 	if (size > (4 << 20))
1199 		size = RTL_MEMORY_P2ROUNDUP(size, (4 << 20));
1200 	else if (size > (512 << 10))
1201 		size = RTL_MEMORY_P2ROUNDUP(size, (512 << 10));
1202 	else
1203 		size = RTL_MEMORY_P2ROUNDUP(size, (64 << 10));
1204 	size -= (pArena->m_quantum + pArena->m_quantum); /* "red-zone" pages */
1205 #else
1206 	/* default allocation granularity */
1207 	size = RTL_MEMORY_P2ROUNDUP(size, SAL_MAX(pArena->m_quantum, 64 << 10));
1208 #endif
1209 
1210 #if defined(SAL_UNX)
1211 	addr = mmap (NULL, (size_t)(size), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
1212 #elif defined(SAL_W32)
1213 	addr = VirtualAlloc (NULL, (SIZE_T)(size), MEM_COMMIT, PAGE_READWRITE);
1214 #elif defined(SAL_OS2)
1215 	/* Use valloc() to use libc 16MB chunks when allocating high memory, to reduce
1216 	   virtual address fragmentation. */
1217 	addr = valloc( size);
1218 #endif /* (SAL_UNX || SAL_W32 || SAL_OS2) */
1219 
1220 	if (addr != MAP_FAILED)
1221 	{
1222 		pArena->m_stats.m_alloc += 1;
1223 		pArena->m_stats.m_mem_total += size;
1224 		pArena->m_stats.m_mem_alloc += size;
1225 
1226 		(*pSize) = size;
1227 		return (addr);
1228 	}
1229 	return (NULL);
1230 }
1231 
1232 /** rtl_machdep_free()
1233  */
1234 static void
rtl_machdep_free(rtl_arena_type * pArena,void * pAddr,sal_Size nSize)1235 SAL_CALL rtl_machdep_free (
1236 	rtl_arena_type * pArena,
1237 	void *           pAddr,
1238 	sal_Size         nSize
1239 )
1240 {
1241 	OSL_PRECOND(pArena == gp_machdep_arena, "rtl_machdep_free(): invalid argument");
1242 
1243 	pArena->m_stats.m_free += 1;
1244 	pArena->m_stats.m_mem_total -= nSize;
1245 	pArena->m_stats.m_mem_alloc -= nSize;
1246 
1247 #if defined(SAL_UNX)
1248 	(void) munmap(pAddr, nSize);
1249 #elif defined(SAL_W32)
1250 	(void) VirtualFree ((LPVOID)(pAddr), (SIZE_T)(0), MEM_RELEASE);
1251 #elif defined(SAL_OS2)
1252 	free(pAddr);
1253 #endif /* (SAL_UNX || SAL_W32) */
1254 }
1255 
1256 /** rtl_machdep_pagesize()
1257  */
1258 static sal_Size
rtl_machdep_pagesize(void)1259 rtl_machdep_pagesize (void)
1260 {
1261 #if defined(SAL_UNX)
1262 #if defined(FREEBSD) || defined(NETBSD)
1263 	return ((sal_Size)getpagesize());
1264 #else  /* POSIX */
1265 	return ((sal_Size)sysconf(_SC_PAGESIZE));
1266 #endif /* xBSD || POSIX */
1267 #elif defined(SAL_W32)
1268 	SYSTEM_INFO info;
1269 	GetSystemInfo (&info);
1270 	return ((sal_Size)(info.dwPageSize));
1271 #elif defined(SAL_OS2)
1272 	ULONG ulPageSize;
1273 	DosQuerySysInfo(QSV_PAGE_SIZE, QSV_PAGE_SIZE, &ulPageSize, sizeof(ULONG));
1274 	return ((sal_Size)ulPageSize);
1275 #endif /* (SAL_UNX || SAL_W32) */
1276 }
1277 
1278 /* ================================================================= *
1279  *
1280  * arena initialization.
1281  *
1282  * ================================================================= */
1283 
1284 static void
rtl_arena_once_init(void)1285 rtl_arena_once_init (void)
1286 {
1287 	{
1288 		/* list of arenas */
1289 		RTL_MEMORY_LOCK_INIT(&(g_arena_list.m_lock));
1290 		rtl_arena_constructor (&(g_arena_list.m_arena_head));
1291 	}
1292 	{
1293 		/* machdep (pseudo) arena */
1294 		static rtl_arena_type g_machdep_arena;
1295 
1296 		OSL_ASSERT(gp_machdep_arena == 0);
1297 		VALGRIND_CREATE_MEMPOOL(&g_machdep_arena, 0, 0);
1298 		rtl_arena_constructor (&g_machdep_arena);
1299 
1300 		gp_machdep_arena = rtl_arena_activate (
1301 			&g_machdep_arena,
1302 			"rtl_machdep_arena",
1303 			rtl_machdep_pagesize(),
1304 			0,       /* no quantum caching */
1305 			0, 0, 0  /* no source */
1306 		);
1307 		OSL_ASSERT(gp_machdep_arena != 0);
1308 	}
1309 	{
1310 		/* default arena */
1311 		static rtl_arena_type g_default_arena;
1312 
1313 		OSL_ASSERT(gp_default_arena == 0);
1314 		VALGRIND_CREATE_MEMPOOL(&g_default_arena, 0, 0);
1315 		rtl_arena_constructor (&g_default_arena);
1316 
1317 		gp_default_arena = rtl_arena_activate (
1318 			&g_default_arena,
1319 			"rtl_default_arena",
1320 			rtl_machdep_pagesize(),
1321 			0,                 /* no quantum caching */
1322 			gp_machdep_arena,  /* source */
1323 			rtl_machdep_alloc,
1324 			rtl_machdep_free
1325 		);
1326 		OSL_ASSERT(gp_default_arena != 0);
1327 	}
1328 	{
1329 		/* arena internal arena */
1330 		static rtl_arena_type g_arena_arena;
1331 
1332 		OSL_ASSERT(gp_arena_arena == 0);
1333 		VALGRIND_CREATE_MEMPOOL(&g_arena_arena, 0, 0);
1334 		rtl_arena_constructor (&g_arena_arena);
1335 
1336 		gp_arena_arena = rtl_arena_activate (
1337 			&g_arena_arena,
1338 			"rtl_arena_internal_arena",
1339 			64,                /* quantum */
1340 			0,                 /* no quantum caching */
1341 			gp_default_arena,  /* source */
1342 			rtl_arena_alloc,
1343 			rtl_arena_free
1344 		);
1345 		OSL_ASSERT(gp_arena_arena != 0);
1346 	}
1347 }
1348 
1349 static int
rtl_arena_init(void)1350 rtl_arena_init (void)
1351 {
1352 	static sal_once_type g_once = SAL_ONCE_INIT;
1353 	SAL_ONCE(&g_once, rtl_arena_once_init);
1354 	return (gp_arena_arena != 0);
1355 }
1356 
1357 /* ================================================================= */
1358 
1359 /*
1360   Issue http://udk.openoffice.org/issues/show_bug.cgi?id=92388
1361 
1362   Mac OS X does not seem to support "__cxa__atexit", thus leading
1363   to the situation that "__attribute__((destructor))__" functions
1364   (in particular "rtl_{memory|cache|arena}_fini") become called
1365   _before_ global C++ object d'tors.
1366 
1367   Delegated the call to "rtl_arena_fini()" into a dummy C++ object,
1368   see alloc_fini.cxx .
1369 */
1370 #if defined(__GNUC__) && !defined(MACOSX)
1371 static void rtl_arena_fini (void) __attribute__((destructor));
1372 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
1373 #pragma fini(rtl_arena_fini)
1374 static void rtl_arena_fini (void);
1375 #endif /* __GNUC__ || __SUNPRO_C */
1376 
1377 void
rtl_arena_fini(void)1378 rtl_arena_fini (void)
1379 {
1380 	if (gp_arena_arena != 0)
1381 	{
1382 		rtl_arena_type * arena, * head;
1383 
1384 		RTL_MEMORY_LOCK_ACQUIRE(&(g_arena_list.m_lock));
1385 		head = &(g_arena_list.m_arena_head);
1386 
1387 		for (arena = head->m_arena_next; arena != head; arena = arena->m_arena_next)
1388 		{
1389 			OSL_TRACE(
1390 				"rtl_arena_fini(\"%s\"): "
1391 				"allocs: %"PRIu64", frees: %"PRIu64"; total: %lu, used: %lu",
1392 				arena->m_name,
1393 				arena->m_stats.m_alloc, arena->m_stats.m_free,
1394 				arena->m_stats.m_mem_total, arena->m_stats.m_mem_alloc
1395 			);
1396 		}
1397 		RTL_MEMORY_LOCK_RELEASE(&(g_arena_list.m_lock));
1398 	}
1399 }
1400 
1401 /* ================================================================= */
1402