CARVIEW |
Select Language
HTTP/2 302
server: nginx
date: Sun, 27 Jul 2025 12:03:19 GMT
content-type: text/plain; charset=utf-8
content-length: 0
x-archive-redirect-reason: found capture at 20170221180203
location: https://web.archive.org/web/20170221180203/https://perl5.git.perl.org/perl.git/blob/HEAD:/malloc.c
server-timing: captures_list;dur=0.506110, exclusion.robots;dur=0.020833, exclusion.robots.policy;dur=0.013937, esindex;dur=0.009676, cdx.remote;dur=17.659679, LoadShardBlock;dur=452.689934, PetaboxLoader3.datanode;dur=203.506660, PetaboxLoader3.resolve;dur=87.839383
x-app-server: wwwb-app201
x-ts: 302
x-tr: 494
server-timing: TR;dur=0,Tw;dur=0,Tc;dur=1
set-cookie: SERVER=wwwb-app201; path=/
x-location: All
x-rl: 0
x-na: 0
x-page-cache: MISS
server-timing: MISS
x-nid: DigitalOcean
referrer-policy: no-referrer-when-downgrade
permissions-policy: interest-cohort=()
HTTP/2 302
server: nginx
date: Sun, 27 Jul 2025 12:03:20 GMT
content-type: text/plain; charset=utf-8
content-length: 0
x-archive-redirect-reason: found capture at 20170221180208
location: https://web.archive.org/web/20170221180208/https://perl5.git.perl.org/perl.git/blob/HEAD:/malloc.c
server-timing: captures_list;dur=0.453252, exclusion.robots;dur=0.021406, exclusion.robots.policy;dur=0.013951, esindex;dur=0.009644, cdx.remote;dur=11.087078, LoadShardBlock;dur=114.714742, PetaboxLoader3.datanode;dur=121.163389, load_resource;dur=142.734175, PetaboxLoader3.resolve;dur=77.716557
x-app-server: wwwb-app201
x-ts: 302
x-tr: 310
server-timing: TR;dur=0,Tw;dur=0,Tc;dur=0
x-location: All
x-rl: 0
x-na: 0
x-page-cache: MISS
server-timing: MISS
x-nid: DigitalOcean
referrer-policy: no-referrer-when-downgrade
permissions-policy: interest-cohort=()
HTTP/2 200
server: nginx
date: Sun, 27 Jul 2025 12:03:21 GMT
content-type: application/xhtml+xml; charset=utf-8
x-archive-orig-date: Tue, 21 Feb 2017 18:02:07 GMT
x-archive-orig-server: Apache/2.2.15 (CentOS)
x-archive-orig-connection: close
x-archive-orig-transfer-encoding: chunked
x-archive-guessed-content-type: text/html
x-archive-guessed-charset: utf-8
memento-datetime: Tue, 21 Feb 2017 18:02:08 GMT
link: ; rel="original", ; rel="timemap"; type="application/link-format", ; rel="timegate", ; rel="first memento"; datetime="Tue, 21 Feb 2017 18:02:03 GMT", ; rel="prev memento"; datetime="Tue, 21 Feb 2017 18:02:03 GMT", ; rel="memento"; datetime="Tue, 21 Feb 2017 18:02:08 GMT", ; rel="next memento"; datetime="Sun, 24 Sep 2023 03:25:39 GMT", ; rel="last memento"; datetime="Wed, 30 Oct 2024 00:33:57 GMT"
content-security-policy: default-src 'self' 'unsafe-eval' 'unsafe-inline' data: blob: archive.org web.archive.org web-static.archive.org wayback-api.archive.org athena.archive.org analytics.archive.org pragma.archivelab.org wwwb-events.archive.org
x-archive-src: falconk_archivebot_www_perlmonks_org_20170214/www.perlmonks.org-inf-20170214-150724-7wgmd-00004.warc.gz
server-timing: captures_list;dur=1.107461, exclusion.robots;dur=0.039330, exclusion.robots.policy;dur=0.025458, esindex;dur=0.015884, cdx.remote;dur=12.496995, LoadShardBlock;dur=232.717639, PetaboxLoader3.datanode;dur=125.562962, PetaboxLoader3.resolve;dur=256.612737, load_resource;dur=184.246341
x-app-server: wwwb-app201
x-ts: 200
x-tr: 1188
server-timing: TR;dur=0,Tw;dur=0,Tc;dur=0
x-location: All
x-rl: 0
x-na: 0
x-page-cache: MISS
server-timing: MISS
x-nid: DigitalOcean
referrer-policy: no-referrer-when-downgrade
permissions-policy: interest-cohort=()
Perl 5 - perl.git/blob - malloc.c
1 /* malloc.c
2 *
3 */
5 /*
6 * 'The Chamber of Records,' said Gimli. 'I guess that is where we now stand.'
7 *
8 * [p.321 of _The Lord of the Rings_, II/v: "The Bridge of Khazad-Dûm"]
9 */
11 /* This file contains Perl's own implementation of the malloc library.
12 * It is used if Configure decides that, on your platform, Perl's
13 * version is better than the OS's, or if you give Configure the
14 * -Dusemymalloc command-line option.
15 */
17 /*
18 Here are some notes on configuring Perl's malloc.
20 There are two macros which serve as bulk disablers of advanced
21 features of this malloc: NO_FANCY_MALLOC, PLAIN_MALLOC (undef by
22 default). Look in the list of default values below to understand
23 their exact effect. Defining NO_FANCY_MALLOC returns malloc.c to the
24 state of the malloc in Perl 5.004. Additionally defining PLAIN_MALLOC
25 returns it to the state as of Perl 5.000.
27 Note that some of the settings below may be ignored in the code based
28 on values of other macros. The PERL_CORE symbol is only defined when
29 perl itself is being compiled (so malloc can make some assumptions
30 about perl's facilities being available to it).
32 Each config option has a short description, followed by its name,
33 default value, and a comment about the default (if applicable). Some
34 options take a precise value, while the others are just boolean.
35 The boolean ones are listed first.
37 # Read configuration settings from malloc_cfg.h
38 HAVE_MALLOC_CFG_H undef
40 # Enable code for an emergency memory pool in $^M. See perlvar.pod
41 # for a description of $^M.
42 PERL_EMERGENCY_SBRK !PLAIN_MALLOC
44 # Enable code for printing memory statistics.
45 DEBUGGING_MSTATS !PLAIN_MALLOC
47 # Move allocation info for small buckets into separate areas.
48 # Memory optimization (especially for small allocations, of the
49 # less than 64 bytes). Since perl usually makes a large number
50 # of small allocations, this is usually a win.
51 PACK_MALLOC (!PLAIN_MALLOC && !RCHECK)
53 # Add one page to big powers of two when calculating bucket size.
54 # This is targeted at big allocations, as are common in image
55 # processing.
56 TWO_POT_OPTIMIZE !PLAIN_MALLOC
58 # Use intermediate bucket sizes between powers-of-two. This is
59 # generally a memory optimization, and a (small) speed pessimization.
60 BUCKETS_ROOT2 !NO_FANCY_MALLOC
62 # Do not check small deallocations for bad free(). Memory
63 # and speed optimization, error reporting pessimization.
64 IGNORE_SMALL_BAD_FREE (!NO_FANCY_MALLOC && !RCHECK)
66 # Use table lookup to decide in which bucket a given allocation will go.
67 SMALL_BUCKET_VIA_TABLE !NO_FANCY_MALLOC
69 # Use a perl-defined sbrk() instead of the (presumably broken or
70 # missing) system-supplied sbrk().
71 USE_PERL_SBRK undef
73 # Use system malloc() (or calloc() etc.) to emulate sbrk(). Normally
74 # only used with broken sbrk()s.
75 PERL_SBRK_VIA_MALLOC undef
77 # Which allocator to use if PERL_SBRK_VIA_MALLOC
78 SYSTEM_ALLOC(a) malloc(a)
80 # Minimal alignment (in bytes, should be a power of 2) of SYSTEM_ALLOC
81 SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES
83 # Disable memory overwrite checking with DEBUGGING. Memory and speed
84 # optimization, error reporting pessimization.
85 NO_RCHECK undef
87 # Enable memory overwrite checking with DEBUGGING. Memory and speed
88 # pessimization, error reporting optimization
89 RCHECK (DEBUGGING && !NO_RCHECK)
91 # Do not overwrite uninit areas with DEBUGGING. Speed
92 # optimization, error reporting pessimization
93 NO_MFILL undef
95 # Overwrite uninit areas with DEBUGGING. Speed
96 # pessimization, error reporting optimization
97 MALLOC_FILL (DEBUGGING && !NO_RCHECK && !NO_MFILL)
99 # Do not check overwritten uninit areas with DEBUGGING. Speed
100 # optimization, error reporting pessimization
101 NO_FILL_CHECK undef
103 # Check overwritten uninit areas with DEBUGGING. Speed
104 # pessimization, error reporting optimization
105 MALLOC_FILL_CHECK (DEBUGGING && !NO_RCHECK && !NO_FILL_CHECK)
107 # Failed allocations bigger than this size croak (if
108 # PERL_EMERGENCY_SBRK is enabled) without touching $^M. See
109 # perlvar.pod for a description of $^M.
110 BIG_SIZE (1<<16) # 64K
112 # Starting from this power of two, add an extra page to the
113 # size of the bucket. This enables optimized allocations of sizes
114 # close to powers of 2. Note that the value is indexed at 0.
115 FIRST_BIG_POW2 15 # 32K, 16K is used too often
117 # Estimate of minimal memory footprint. malloc uses this value to
118 # request the most reasonable largest blocks of memory from the system.
119 FIRST_SBRK (48*1024)
121 # Round up sbrk()s to multiples of this.
122 MIN_SBRK 2048
124 # Round up sbrk()s to multiples of this percent of footprint.
125 MIN_SBRK_FRAC 3
127 # Round up sbrk()s to multiples of this multiple of 1/1000 of footprint.
128 MIN_SBRK_FRAC1000 (10 * MIN_SBRK_FRAC)
130 # Add this much memory to big powers of two to get the bucket size.
131 PERL_PAGESIZE 4096
133 # This many sbrk() discontinuities should be tolerated even
134 # from the start without deciding that sbrk() is usually
135 # discontinuous.
136 SBRK_ALLOW_FAILURES 3
138 # This many continuous sbrk()s compensate for one discontinuous one.
139 SBRK_FAILURE_PRICE 50
141 # Some configurations may ask for 12-byte-or-so allocations which
142 # require 8-byte alignment (?!). In such situation one needs to
143 # define this to disable 12-byte bucket (will increase memory footprint)
144 STRICT_ALIGNMENT undef
146 # Do not allow configuration of runtime options at runtime
147 NO_MALLOC_DYNAMIC_CFG undef
149 # Do not allow configuration of runtime options via $ENV{PERL_MALLOC_OPT}
150 NO_PERL_MALLOC_ENV undef
152 [The variable consists of ;-separated parts of the form CODE=VALUE
153 with 1-character codes F, M, f, A, P, G, d, a, c for runtime
154 configuration of FIRST_SBRK, MIN_SBRK, MIN_SBRK_FRAC1000,
155 SBRK_ALLOW_FAILURES, SBRK_FAILURE_PRICE, sbrk_goodness,
156 filldead, fillalive, fillcheck. The last 3 are for DEBUGGING
157 build, and allow switching the tests for free()ed memory read,
158 uninit memory reads, and free()ed memory write.]
160 This implementation assumes that calling PerlIO_printf() does not
161 result in any memory allocation calls (used during a panic).
163 */
166 #ifdef HAVE_MALLOC_CFG_H
167 # include "malloc_cfg.h"
168 #endif
170 #ifndef NO_FANCY_MALLOC
171 # ifndef SMALL_BUCKET_VIA_TABLE
172 # define SMALL_BUCKET_VIA_TABLE
173 # endif
174 # ifndef BUCKETS_ROOT2
175 # define BUCKETS_ROOT2
176 # endif
177 # ifndef IGNORE_SMALL_BAD_FREE
178 # define IGNORE_SMALL_BAD_FREE
179 # endif
180 #endif
182 #ifndef PLAIN_MALLOC /* Bulk enable features */
183 # ifndef PACK_MALLOC
184 # define PACK_MALLOC
185 # endif
186 # ifndef TWO_POT_OPTIMIZE
187 # define TWO_POT_OPTIMIZE
188 # endif
189 # ifndef PERL_EMERGENCY_SBRK
190 # define PERL_EMERGENCY_SBRK
191 # endif
192 # ifndef DEBUGGING_MSTATS
193 # define DEBUGGING_MSTATS
194 # endif
195 #endif
197 #define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */
198 #define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2)
200 #define LOG_OF_MIN_ARENA 11
202 #if defined(DEBUGGING) && !defined(NO_RCHECK)
203 # define RCHECK
204 #endif
205 #if defined(DEBUGGING) && !defined(NO_RCHECK) && !defined(NO_MFILL) && !defined(MALLOC_FILL)
206 # define MALLOC_FILL
207 #endif
208 #if defined(DEBUGGING) && !defined(NO_RCHECK) && !defined(NO_FILL_CHECK) && !defined(MALLOC_FILL_CHECK)
209 # define MALLOC_FILL_CHECK
210 #endif
211 #if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE)
212 # undef IGNORE_SMALL_BAD_FREE
213 #endif
214 /*
215 * malloc.c (Caltech) 2/21/82
216 * Chris Kingsley, kingsley@cit-20.
217 *
218 * This is a very fast storage allocator. It allocates blocks of a small
219 * number of different sizes, and keeps free lists of each size. Blocks that
220 * don't exactly fit are passed up to the next larger size. In this
221 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
222 * If PACK_MALLOC is defined, small blocks are 2^n bytes long.
223 * This is designed for use in a program that uses vast quantities of memory,
224 * but bombs when it runs out.
225 *
226 * Modifications Copyright Ilya Zakharevich 1996-99.
227 *
228 * Still very quick, but much more thrifty. (Std config is 10% slower
229 * than it was, and takes 67% of old heap size for typical usage.)
230 *
231 * Allocations of small blocks are now table-driven to many different
232 * buckets. Sizes of really big buckets are increased to accommodate
233 * common size=power-of-2 blocks. Running-out-of-memory is made into
234 * an exception. Deeply configurable and thread-safe.
235 *
236 */
238 #include "EXTERN.h"
239 #define PERL_IN_MALLOC_C
240 #include "perl.h"
241 #if defined(PERL_IMPLICIT_CONTEXT)
242 # define croak Perl_croak_nocontext
243 # define croak2 Perl_croak_nocontext
244 # define warn Perl_warn_nocontext
245 # define warn2 Perl_warn_nocontext
246 #else
247 # define croak2 croak
248 # define warn2 warn
249 #endif
250 #ifdef USE_ITHREADS
251 # define PERL_MAYBE_ALIVE PL_thr_key
252 #else
253 # define PERL_MAYBE_ALIVE 1
254 #endif
256 #ifndef MYMALLOC
257 # error "MYMALLOC is not defined"
258 #endif
260 #ifndef MUTEX_LOCK
261 # define MUTEX_LOCK(l)
262 #endif
264 #ifndef MUTEX_UNLOCK
265 # define MUTEX_UNLOCK(l)
266 #endif
268 #ifndef MALLOC_LOCK
269 # define MALLOC_LOCK MUTEX_LOCK(&PL_malloc_mutex)
270 #endif
272 #ifndef MALLOC_UNLOCK
273 # define MALLOC_UNLOCK MUTEX_UNLOCK(&PL_malloc_mutex)
274 #endif
276 # ifndef fatalcroak /* make depend */
277 # define fatalcroak(mess) (write(2, (mess), strlen(mess)), exit(2))
278 # endif
280 #ifdef DEBUGGING
281 # undef DEBUG_m
282 # define DEBUG_m(a) \
283 STMT_START { \
284 if (PERL_MAYBE_ALIVE && PERL_GET_THX) { \
285 dTHX; \
286 if (DEBUG_m_TEST) { \
287 PL_debug &= ~DEBUG_m_FLAG; \
288 a; \
289 PL_debug |= DEBUG_m_FLAG; \
290 } \
291 } \
292 } STMT_END
293 #endif
295 #ifdef PERL_IMPLICIT_CONTEXT
296 # define PERL_IS_ALIVE aTHX
297 #else
298 # define PERL_IS_ALIVE TRUE
299 #endif
302 /*
303 * Layout of memory:
304 * ~~~~~~~~~~~~~~~~
305 * The memory is broken into "blocks" which occupy multiples of 2K (and
306 * generally speaking, have size "close" to a power of 2). The addresses
307 * of such *unused* blocks are kept in nextf[i] with big enough i. (nextf
308 * is an array of linked lists.) (Addresses of used blocks are not known.)
309 *
310 * Moreover, since the algorithm may try to "bite" smaller blocks out
311 * of unused bigger ones, there are also regions of "irregular" size,
312 * managed separately, by a linked list chunk_chain.
313 *
314 * The third type of storage is the sbrk()ed-but-not-yet-used space, its
315 * end and size are kept in last_sbrk_top and sbrked_remains.
316 *
317 * Growing blocks "in place":
318 * ~~~~~~~~~~~~~~~~~~~~~~~~~
319 * The address of the block with the greatest address is kept in last_op
320 * (if not known, last_op is 0). If it is known that the memory above
321 * last_op is not continuous, or contains a chunk from chunk_chain,
322 * last_op is set to 0.
323 *
324 * The chunk with address last_op may be grown by expanding into
325 * sbrk()ed-but-not-yet-used space, or trying to sbrk() more continuous
326 * memory.
327 *
328 * Management of last_op:
329 * ~~~~~~~~~~~~~~~~~~~~~
330 *
331 * free() never changes the boundaries of blocks, so is not relevant.
332 *
333 * The only way realloc() may change the boundaries of blocks is if it
334 * grows a block "in place". However, in the case of success such a
335 * chunk is automatically last_op, and it remains last_op. In the case
336 * of failure getpages_adjacent() clears last_op.
337 *
338 * malloc() may change blocks by calling morecore() only.
339 *
340 * morecore() may create new blocks by:
341 * a) biting pieces from chunk_chain (cannot create one above last_op);
342 * b) biting a piece from an unused block (if block was last_op, this
343 * may create a chunk from chain above last_op, thus last_op is
344 * invalidated in such a case).
345 * c) biting of sbrk()ed-but-not-yet-used space. This creates
346 * a block which is last_op.
347 * d) Allocating new pages by calling getpages();
348 *
349 * getpages() creates a new block. It marks last_op at the bottom of
350 * the chunk of memory it returns.
351 *
352 * Active pages footprint:
353 * ~~~~~~~~~~~~~~~~~~~~~~
354 * Note that we do not need to traverse the lists in nextf[i], just take
355 * the first element of this list. However, we *need* to traverse the
356 * list in chunk_chain, but most the time it should be a very short one,
357 * so we do not step on a lot of pages we are not going to use.
358 *
359 * Flaws:
360 * ~~~~~
361 * get_from_bigger_buckets(): forget to increment price => Quite
362 * aggressive.
363 */
365 /* I don't much care whether these are defined in sys/types.h--LAW */
367 #define u_char unsigned char
368 #define u_int unsigned int
369 /*
370 * I removed the definition of u_bigint which appeared to be u_bigint = UV
371 * u_bigint was only used in TWOK_MASKED and TWOK_SHIFT
372 * where I have used PTR2UV. RMB
373 */
374 #define u_short unsigned short
376 #if defined(RCHECK) && defined(PACK_MALLOC)
377 # undef PACK_MALLOC
378 #endif
380 /*
381 * The description below is applicable if PACK_MALLOC is not defined.
382 *
383 * The overhead on a block is at least 4 bytes. When free, this space
384 * contains a pointer to the next free block, and the bottom two bits must
385 * be zero. When in use, the first byte is set to MAGIC, and the second
386 * byte is the size index. The remaining bytes are for alignment.
387 * If range checking is enabled and the size of the block fits
388 * in two bytes, then the top two bytes hold the size of the requested block
389 * plus the range checking words, and the header word MINUS ONE.
390 */
391 union overhead {
392 union overhead *ov_next; /* when free */
393 #if MEM_ALIGNBYTES > 4
394 double strut; /* alignment problems */
395 # if MEM_ALIGNBYTES > 8
396 char sstrut[MEM_ALIGNBYTES]; /* for the sizing */
397 # endif
398 #endif
399 struct {
400 /*
401 * Keep the ovu_index and ovu_magic in this order, having a char
402 * field first gives alignment indigestion in some systems, such as
403 * MachTen.
404 */
405 u_char ovu_index; /* bucket # */
406 u_char ovu_magic; /* magic number */
407 #ifdef RCHECK
408 /* Subtract one to fit into u_short for an extra bucket */
409 u_short ovu_size; /* block size (requested + overhead - 1) */
410 u_int ovu_rmagic; /* range magic number */
411 #endif
412 } ovu;
413 #define ov_magic ovu.ovu_magic
414 #define ov_index ovu.ovu_index
415 #define ov_size ovu.ovu_size
416 #define ov_rmagic ovu.ovu_rmagic
417 };
419 #define MAGIC 0xff /* magic # on accounting info */
420 #define RMAGIC 0x55555555 /* magic # on range info */
421 #define RMAGIC_C 0x55 /* magic # on range info */
423 #ifdef RCHECK
424 # define RMAGIC_SZ sizeof (u_int) /* Overhead at end of bucket */
425 # ifdef TWO_POT_OPTIMIZE
426 # define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2) /* size-1 fits in short */
427 # else
428 # define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2)
429 # endif
430 #else
431 # define RMAGIC_SZ 0
432 #endif
434 #if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2)
435 # undef BUCKETS_ROOT2
436 #endif
438 #ifdef BUCKETS_ROOT2
439 # define BUCKET_TABLE_SHIFT 2
440 # define BUCKET_POW2_SHIFT 1
441 # define BUCKETS_PER_POW2 2
442 #else
443 # define BUCKET_TABLE_SHIFT MIN_BUC_POW2
444 # define BUCKET_POW2_SHIFT 0
445 # define BUCKETS_PER_POW2 1
446 #endif
448 #if !defined(MEM_ALIGNBYTES) || ((MEM_ALIGNBYTES > 4) && !defined(STRICT_ALIGNMENT))
449 /* Figure out the alignment of void*. */
450 struct aligner {
451 char c;
452 void *p;
453 };
454 # define ALIGN_SMALL ((IV)((caddr_t)&(((struct aligner*)0)->p)))
455 #else
456 # define ALIGN_SMALL MEM_ALIGNBYTES
457 #endif
459 #define IF_ALIGN_8(yes,no) ((ALIGN_SMALL>4) ? (yes) : (no))
461 #ifdef BUCKETS_ROOT2
462 # define MAX_BUCKET_BY_TABLE 13
463 static const u_short buck_size[MAX_BUCKET_BY_TABLE + 1] =
464 {
465 0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80,
466 };
467 # define BUCKET_SIZE_NO_SURPLUS(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT)))
468 # define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE \
469 ? buck_size[i] \
470 : ((1 << ((i) >> BUCKET_POW2_SHIFT)) \
471 - MEM_OVERHEAD(i) \
472 + POW2_OPTIMIZE_SURPLUS(i)))
473 #else
474 # define BUCKET_SIZE_NO_SURPLUS(i) (1 << ((i) >> BUCKET_POW2_SHIFT))
475 # define BUCKET_SIZE(i) (BUCKET_SIZE_NO_SURPLUS(i) + POW2_OPTIMIZE_SURPLUS(i))
476 # define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i))
477 #endif
480 #ifdef PACK_MALLOC
481 /* In this case there are several possible layout of arenas depending
482 * on the size. Arenas are of sizes multiple to 2K, 2K-aligned, and
483 * have a size close to a power of 2.
484 *
485 * Arenas of the size >= 4K keep one chunk only. Arenas of size 2K
486 * may keep one chunk or multiple chunks. Here are the possible
487 * layouts of arenas:
488 *
489 * # One chunk only, chunksize 2^k + SOMETHING - ALIGN, k >= 11
490 *
491 * INDEX MAGIC1 UNUSED CHUNK1
492 *
493 * # Multichunk with sanity checking and chunksize 2^k-ALIGN, k>7
494 *
495 * INDEX MAGIC1 MAGIC2 MAGIC3 UNUSED CHUNK1 CHUNK2 CHUNK3 ...
496 *
497 * # Multichunk with sanity checking and size 2^k-ALIGN, k=7
498 *
499 * INDEX MAGIC1 MAGIC2 MAGIC3 UNUSED CHUNK1 UNUSED CHUNK2 CHUNK3 ...
500 *
501 * # Multichunk with sanity checking and size up to 80
502 *
503 * INDEX UNUSED MAGIC1 UNUSED MAGIC2 UNUSED ... CHUNK1 CHUNK2 CHUNK3 ...
504 *
505 * # No sanity check (usually up to 48=byte-long buckets)
506 * INDEX UNUSED CHUNK1 CHUNK2 ...
507 *
508 * Above INDEX and MAGIC are one-byte-long. Sizes of UNUSED are
509 * appropriate to keep algorithms simple and memory aligned. INDEX
510 * encodes the size of the chunk, while MAGICn encodes state (used,
511 * free or non-managed-by-us-so-it-indicates-a-bug) of CHUNKn. MAGIC
512 * is used for sanity checking purposes only. SOMETHING is 0 or 4K
513 * (to make size of big CHUNK accommodate allocations for powers of two
514 * better).
515 *
516 * [There is no need to alignment between chunks, since C rules ensure
517 * that structs which need 2^k alignment have sizeof which is
518 * divisible by 2^k. Thus as far as the last chunk is aligned at the
519 * end of the arena, and 2K-alignment does not contradict things,
520 * everything is going to be OK for sizes of chunks 2^n and 2^n +
521 * 2^k. Say, 80-bit buckets will be 16-bit aligned, and as far as we
522 * put allocations for requests in 65..80 range, all is fine.
523 *
524 * Note, however, that standard malloc() puts more strict
525 * requirements than the above C rules. Moreover, our algorithms of
526 * realloc() may break this idyll, but we suppose that realloc() does
527 * need not change alignment.]
528 *
529 * Is very important to make calculation of the offset of MAGICm as
530 * quick as possible, since it is done on each malloc()/free(). In
531 * fact it is so quick that it has quite little effect on the speed of
532 * doing malloc()/free(). [By default] We forego such calculations
533 * for small chunks, but only to save extra 3% of memory, not because
534 * of speed considerations.
535 *
536 * Here is the algorithm [which is the same for all the allocations
537 * schemes above], see OV_MAGIC(block,bucket). Let OFFSETm be the
538 * offset of the CHUNKm from the start of ARENA. Then offset of
539 * MAGICm is (OFFSET1 >> SHIFT) + ADDOFFSET. Here SHIFT and ADDOFFSET
540 * are numbers which depend on the size of the chunks only.
541 *
542 * Let as check some sanity conditions. Numbers OFFSETm>>SHIFT are
543 * different for all the chunks in the arena if 2^SHIFT is not greater
544 * than size of the chunks in the arena. MAGIC1 will not overwrite
545 * INDEX provided ADDOFFSET is >0 if OFFSET1 < 2^SHIFT. MAGIClast
546 * will not overwrite CHUNK1 if OFFSET1 > (OFFSETlast >> SHIFT) +
547 * ADDOFFSET.
548 *
549 * Make SHIFT the maximal possible (there is no point in making it
550 * smaller). Since OFFSETlast is 2K - CHUNKSIZE, above restrictions
551 * give restrictions on OFFSET1 and on ADDOFFSET.
552 *
553 * In particular, for chunks of size 2^k with k>=6 we can put
554 * ADDOFFSET to be from 0 to 2^k - 2^(11-k), and have
555 * OFFSET1==chunksize. For chunks of size 80 OFFSET1 of 2K%80=48 is
556 * large enough to have ADDOFFSET between 1 and 16 (similarly for 96,
557 * when ADDOFFSET should be 1). In particular, keeping MAGICs for
558 * these sizes gives no additional size penalty.
559 *
560 * However, for chunks of size 2^k with k<=5 this gives OFFSET1 >=
561 * ADDOFSET + 2^(11-k). Keeping ADDOFFSET 0 allows for 2^(11-k)-2^(11-2k)
562 * chunks per arena. This is smaller than 2^(11-k) - 1 which are
563 * needed if no MAGIC is kept. [In fact, having a negative ADDOFFSET
564 * would allow for slightly more buckets per arena for k=2,3.]
565 *
566 * Similarly, for chunks of size 3/2*2^k with k<=5 MAGICs would span
567 * the area up to 2^(11-k)+ADDOFFSET. For k=4 this give optimal
568 * ADDOFFSET as -7..0. For k=3 ADDOFFSET can go up to 4 (with tiny
569 * savings for negative ADDOFFSET). For k=5 ADDOFFSET can go -1..16
570 * (with no savings for negative values).
571 *
572 * In particular, keeping ADDOFFSET 0 for sizes of chunks up to 2^6
573 * leads to tiny pessimizations in case of sizes 4, 8, 12, 24, and
574 * leads to no contradictions except for size=80 (or 96.)
575 *
576 * However, it also makes sense to keep no magic for sizes 48 or less.
577 * This is what we do. In this case one needs ADDOFFSET>=1 also for
578 * chunksizes 12, 24, and 48, unless one gets one less chunk per
579 * arena.
580 *
581 * The algo of OV_MAGIC(block,bucket) keeps ADDOFFSET 0 until
582 * chunksize of 64, then makes it 1.
583 *
584 * This allows for an additional optimization: the above scheme leads
585 * to giant overheads for sizes 128 or more (one whole chunk needs to
586 * be sacrifised to keep INDEX). Instead we use chunks not of size
587 * 2^k, but of size 2^k-ALIGN. If we pack these chunks at the end of
588 * the arena, then the beginnings are still in different 2^k-long
589 * sections of the arena if k>=7 for ALIGN==4, and k>=8 if ALIGN=8.
590 * Thus for k>7 the above algo of calculating the offset of the magic
591 * will still give different answers for different chunks. And to
592 * avoid the overrun of MAGIC1 into INDEX, one needs ADDOFFSET of >=1.
593 * In the case k=7 we just move the first chunk an extra ALIGN
594 * backward inside the ARENA (this is done once per arena lifetime,
595 * thus is not a big overhead). */
596 # define MAX_PACKED_POW2 6
597 # define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT)
598 # define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD)
599 # define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1)
600 # define TWOK_MASKED(x) (PTR2UV(x) & ~TWOK_MASK)
601 # define TWOK_SHIFT(x) (PTR2UV(x) & TWOK_MASK)
602 # define OV_INDEXp(block) (INT2PTR(u_char*,TWOK_MASKED(block)))
603 # define OV_INDEX(block) (*OV_INDEXp(block))
604 # define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) + \
605 (TWOK_SHIFT(block)>> \
606 (bucket>>BUCKET_POW2_SHIFT)) + \
607 (bucket >= MIN_NEEDS_SHIFT ? 1 : 0)))
608 /* A bucket can have a shift smaller than it size, we need to
609 shift its magic number so it will not overwrite index: */
610 # ifdef BUCKETS_ROOT2
611 # define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */
612 # else
613 # define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */
614 # endif
615 # define CHUNK_SHIFT 0
617 /* Number of active buckets of given ordinal. */
618 #ifdef IGNORE_SMALL_BAD_FREE
619 #define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */
620 # define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
621 ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE_NO_SURPLUS(bucket) \
622 : n_blks[bucket] )
623 #else
624 # define N_BLKS(bucket) n_blks[bucket]
625 #endif
627 static const u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
628 {
629 # if BUCKETS_PER_POW2==1
630 0, 0,
631 (MIN_BUC_POW2==2 ? 384 : 0),
632 224, 120, 62, 31, 16, 8, 4, 2
633 # else
634 0, 0, 0, 0,
635 (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0), /* 4, 4 */
636 224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2
637 # endif
638 };
640 /* Shift of the first bucket with the given ordinal inside 2K chunk. */
641 #ifdef IGNORE_SMALL_BAD_FREE
642 # define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK \
643 ? ((1<<LOG_OF_MIN_ARENA) \
644 - BUCKET_SIZE_NO_SURPLUS(bucket) * N_BLKS(bucket)) \
645 : blk_shift[bucket])
646 #else
647 # define BLK_SHIFT(bucket) blk_shift[bucket]
648 #endif
650 static const u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
651 {
652 # if BUCKETS_PER_POW2==1
653 0, 0,
654 (MIN_BUC_POW2==2 ? 512 : 0),
655 256, 128, 64, 64, /* 8 to 64 */
656 16*sizeof(union overhead),
657 8*sizeof(union overhead),
658 4*sizeof(union overhead),
659 2*sizeof(union overhead),
660 # else
661 0, 0, 0, 0,
662 (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0),
663 256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */
664 16*sizeof(union overhead), 16*sizeof(union overhead),
665 8*sizeof(union overhead), 8*sizeof(union overhead),
666 4*sizeof(union overhead), 4*sizeof(union overhead),
667 2*sizeof(union overhead), 2*sizeof(union overhead),
668 # endif
669 };
671 # define NEEDED_ALIGNMENT 0x800 /* 2k boundaries */
672 # define WANTED_ALIGNMENT 0x800 /* 2k boundaries */
674 #else /* !PACK_MALLOC */
676 # define OV_MAGIC(block,bucket) (block)->ov_magic
677 # define OV_INDEX(block) (block)->ov_index
678 # define CHUNK_SHIFT 1
679 # define MAX_PACKED -1
680 # define NEEDED_ALIGNMENT MEM_ALIGNBYTES
681 # define WANTED_ALIGNMENT 0x400 /* 1k boundaries */
683 #endif /* !PACK_MALLOC */
685 #define M_OVERHEAD (sizeof(union overhead) + RMAGIC_SZ) /* overhead at start+end */
687 #ifdef PACK_MALLOC
688 # define MEM_OVERHEAD(bucket) \
689 (bucket <= MAX_PACKED ? 0 : M_OVERHEAD)
690 # ifdef SMALL_BUCKET_VIA_TABLE
691 # define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2)
692 # define START_SHIFT MAX_PACKED_POW2
693 # ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
694 # define SIZE_TABLE_MAX 80
695 # else
696 # define SIZE_TABLE_MAX 64
697 # endif
698 static const char bucket_of[] =
699 {
700 # ifdef BUCKETS_ROOT2 /* Chunks of size 3*2^n. */
701 /* 0 to 15 in 4-byte increments. */
702 (sizeof(void*) > 4 ? 6 : 5), /* 4/8, 5-th bucket for better reports */
703 6, /* 8 */
704 IF_ALIGN_8(8,7), 8, /* 16/12, 16 */
705 9, 9, 10, 10, /* 24, 32 */
706 11, 11, 11, 11, /* 48 */
707 12, 12, 12, 12, /* 64 */
708 13, 13, 13, 13, /* 80 */
709 13, 13, 13, 13 /* 80 */
710 # else /* !BUCKETS_ROOT2 */
711 /* 0 to 15 in 4-byte increments. */
712 (sizeof(void*) > 4 ? 3 : 2),
713 3,
714 4, 4,
715 5, 5, 5, 5,
716 6, 6, 6, 6,
717 6, 6, 6, 6
718 # endif /* !BUCKETS_ROOT2 */
719 };
720 # else /* !SMALL_BUCKET_VIA_TABLE */
721 # define START_SHIFTS_BUCKET MIN_BUCKET
722 # define START_SHIFT (MIN_BUC_POW2 - 1)
723 # endif /* !SMALL_BUCKET_VIA_TABLE */
724 #else /* !PACK_MALLOC */
725 # define MEM_OVERHEAD(bucket) M_OVERHEAD
726 # ifdef SMALL_BUCKET_VIA_TABLE
727 # undef SMALL_BUCKET_VIA_TABLE
728 # endif
729 # define START_SHIFTS_BUCKET MIN_BUCKET
730 # define START_SHIFT (MIN_BUC_POW2 - 1)
731 #endif /* !PACK_MALLOC */
733 /*
734 * Big allocations are often of the size 2^n bytes. To make them a
735 * little bit better, make blocks of size 2^n+pagesize for big n.
736 */
738 #ifdef TWO_POT_OPTIMIZE
740 # ifndef PERL_PAGESIZE
741 # define PERL_PAGESIZE 4096
742 # endif
743 # ifndef FIRST_BIG_POW2
744 # define FIRST_BIG_POW2 15 /* 32K, 16K is used too often. */
745 # endif
746 # define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2)
747 /* If this value or more, check against bigger blocks. */
748 # define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD)
749 /* If less than this value, goes into 2^n-overhead-block. */
750 # define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD)
752 # define POW2_OPTIMIZE_ADJUST(nbytes) \
753 ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0)
754 # define POW2_OPTIMIZE_SURPLUS(bucket) \
755 ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0)
757 #else /* !TWO_POT_OPTIMIZE */
758 # define POW2_OPTIMIZE_ADJUST(nbytes)
759 # define POW2_OPTIMIZE_SURPLUS(bucket) 0
760 #endif /* !TWO_POT_OPTIMIZE */
762 #define BARK_64K_LIMIT(what,nbytes,size)
764 #ifndef MIN_SBRK
765 # define MIN_SBRK 2048
766 #endif
768 #ifndef FIRST_SBRK
769 # define FIRST_SBRK (48*1024)
770 #endif
772 /* Minimal sbrk in percents of what is already alloced. */
773 #ifndef MIN_SBRK_FRAC
774 # define MIN_SBRK_FRAC 3
775 #endif
777 #ifndef SBRK_ALLOW_FAILURES
778 # define SBRK_ALLOW_FAILURES 3
779 #endif
781 #ifndef SBRK_FAILURE_PRICE
782 # define SBRK_FAILURE_PRICE 50
783 #endif
785 static void morecore (int bucket);
786 # if defined(DEBUGGING)
787 static void botch (const char *diag, const char *s, const char *file, int line);
788 # endif
789 static void add_to_chain (void *p, MEM_SIZE size, MEM_SIZE chip);
790 static void* get_from_chain (MEM_SIZE size);
791 static void* get_from_bigger_buckets(int bucket, MEM_SIZE size);
792 static union overhead *getpages (MEM_SIZE needed, int *nblksp, int bucket);
793 static int getpages_adjacent(MEM_SIZE require);
795 #ifdef I_MACH_CTHREADS
796 # undef MUTEX_LOCK
797 # define MUTEX_LOCK(m) STMT_START { if (*m) mutex_lock(*m); } STMT_END
798 # undef MUTEX_UNLOCK
799 # define MUTEX_UNLOCK(m) STMT_START { if (*m) mutex_unlock(*m); } STMT_END
800 #endif
802 #ifndef PTRSIZE
803 # define PTRSIZE sizeof(void*)
804 #endif
806 #ifndef BITS_IN_PTR
807 # define BITS_IN_PTR (8*PTRSIZE)
808 #endif
810 /*
811 * nextf[i] is the pointer to the next free block of size 2^i. The
812 * smallest allocatable block is 8 bytes. The overhead information
813 * precedes the data area returned to the user.
814 */
815 #define NBUCKETS (BITS_IN_PTR*BUCKETS_PER_POW2 + 1)
816 static union overhead *nextf[NBUCKETS];
818 #if defined(PURIFY) && !defined(USE_PERL_SBRK)
819 # define USE_PERL_SBRK
820 #endif
822 #ifdef USE_PERL_SBRK
823 # define sbrk(a) Perl_sbrk(a)
824 Malloc_t Perl_sbrk (int size);
825 #else
826 # ifndef HAS_SBRK_PROTO /* <unistd.h> usually takes care of this */
827 extern Malloc_t sbrk(int);
828 # endif
829 #endif
831 #ifndef MIN_SBRK_FRAC1000 /* Backward compatibility */
832 # define MIN_SBRK_FRAC1000 (MIN_SBRK_FRAC * 10)
833 #endif
835 #ifndef START_EXTERN_C
836 # ifdef __cplusplus
837 # define START_EXTERN_C extern "C" {
838 # else
839 # define START_EXTERN_C
840 # endif
841 #endif
843 #ifndef END_EXTERN_C
844 # ifdef __cplusplus
845 # define END_EXTERN_C };
846 # else
847 # define END_EXTERN_C
848 # endif
849 #endif
851 #include "malloc_ctl.h"
853 #ifndef NO_MALLOC_DYNAMIC_CFG
854 # define PERL_MALLOC_OPT_CHARS "FMfAPGdac"
856 # ifndef FILL_DEAD_DEFAULT
857 # define FILL_DEAD_DEFAULT 1
858 # endif
859 # ifndef FILL_ALIVE_DEFAULT
860 # define FILL_ALIVE_DEFAULT 1
861 # endif
862 # ifndef FILL_CHECK_DEFAULT
863 # define FILL_CHECK_DEFAULT 1
864 # endif
866 static IV MallocCfg[MallocCfg_last] = {
867 FIRST_SBRK,
868 MIN_SBRK,
869 MIN_SBRK_FRAC,
870 SBRK_ALLOW_FAILURES,
871 SBRK_FAILURE_PRICE,
872 SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE, /* sbrk_goodness */
873 FILL_DEAD_DEFAULT, /* FILL_DEAD */
874 FILL_ALIVE_DEFAULT, /* FILL_ALIVE */
875 FILL_CHECK_DEFAULT, /* FILL_CHECK */
876 0, /* MallocCfg_skip_cfg_env */
877 0, /* MallocCfg_cfg_env_read */
878 0, /* MallocCfg_emergency_buffer_size */
879 0, /* MallocCfg_emergency_buffer_prepared_size */
880 0 /* MallocCfg_emergency_buffer_last_req */
881 };
882 IV *MallocCfg_ptr = MallocCfg;
884 static char* MallocCfgP[MallocCfg_last] = {
885 0, /* MallocCfgP_emergency_buffer */
886 0, /* MallocCfgP_emergency_buffer_prepared */
887 };
888 char **MallocCfgP_ptr = MallocCfgP;
890 # undef MIN_SBRK
891 # undef FIRST_SBRK
892 # undef MIN_SBRK_FRAC1000
893 # undef SBRK_ALLOW_FAILURES
894 # undef SBRK_FAILURE_PRICE
896 # define MIN_SBRK MallocCfg[MallocCfg_MIN_SBRK]
897 # define FIRST_SBRK MallocCfg[MallocCfg_FIRST_SBRK]
898 # define MIN_SBRK_FRAC1000 MallocCfg[MallocCfg_MIN_SBRK_FRAC1000]
899 # define SBRK_ALLOW_FAILURES MallocCfg[MallocCfg_SBRK_ALLOW_FAILURES]
900 # define SBRK_FAILURE_PRICE MallocCfg[MallocCfg_SBRK_FAILURE_PRICE]
902 # define sbrk_goodness MallocCfg[MallocCfg_sbrk_goodness]
904 # define emergency_buffer_size MallocCfg[MallocCfg_emergency_buffer_size]
905 # define emergency_buffer_last_req MallocCfg[MallocCfg_emergency_buffer_last_req]
907 # define FILL_DEAD MallocCfg[MallocCfg_filldead]
908 # define FILL_ALIVE MallocCfg[MallocCfg_fillalive]
909 # define FILL_CHECK_CFG MallocCfg[MallocCfg_fillcheck]
910 # define FILL_CHECK (FILL_DEAD && FILL_CHECK_CFG)
912 # define emergency_buffer MallocCfgP[MallocCfgP_emergency_buffer]
913 # define emergency_buffer_prepared MallocCfgP[MallocCfgP_emergency_buffer_prepared]
915 #else /* defined(NO_MALLOC_DYNAMIC_CFG) */
917 # define FILL_DEAD 1
918 # define FILL_ALIVE 1
919 # define FILL_CHECK 1
920 static int sbrk_goodness = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE;
922 # define NO_PERL_MALLOC_ENV
924 #endif
926 #ifdef DEBUGGING_MSTATS
927 /*
928 * nmalloc[i] is the difference between the number of mallocs and frees
929 * for a given block size.
930 */
931 static u_int nmalloc[NBUCKETS];
932 static u_int sbrk_slack;
933 static u_int start_slack;
934 #else /* !( defined DEBUGGING_MSTATS ) */
935 # define sbrk_slack 0
936 #endif
938 static u_int goodsbrk;
940 #ifdef PERL_EMERGENCY_SBRK
942 # ifndef BIG_SIZE
943 # define BIG_SIZE (1<<16) /* 64K */
944 # endif
946 # ifdef NO_MALLOC_DYNAMIC_CFG
947 static MEM_SIZE emergency_buffer_size;
948 /* 0 if the last request for more memory succeeded.
949 Otherwise the size of the failing request. */
950 static MEM_SIZE emergency_buffer_last_req;
951 static char *emergency_buffer;
952 static char *emergency_buffer_prepared;
953 # endif
955 # ifndef emergency_sbrk_croak
956 # define emergency_sbrk_croak croak2
957 # endif
959 static char *
960 perl_get_emergency_buffer(IV *size)
961 {
962 dTHX;
963 /* First offense, give a possibility to recover by dieing. */
964 /* No malloc involved here: */
965 SV *sv;
966 char *pv;
967 GV **gvp = (GV**)hv_fetchs(PL_defstash, "^M", FALSE);
969 if (!gvp) gvp = (GV**)hv_fetchs(PL_defstash, "\015", FALSE);
970 if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv)
971 || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD))
972 return NULL; /* Now die die die... */
973 /* Got it, now detach SvPV: */
974 pv = SvPV_nolen(sv);
975 /* Check alignment: */
976 if ((PTR2UV(pv) - sizeof(union overhead)) & (NEEDED_ALIGNMENT - 1)) {
977 PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
978 return NULL; /* die die die */
979 }
981 SvPOK_off(sv);
982 SvPV_set(sv, NULL);
983 SvCUR_set(sv, 0);
984 SvLEN_set(sv, 0);
985 *size = malloced_size(pv) + M_OVERHEAD;
986 return pv - sizeof(union overhead);
987 }
988 # define PERL_GET_EMERGENCY_BUFFER(p) perl_get_emergency_buffer(p)
990 # ifndef NO_MALLOC_DYNAMIC_CFG
991 static char *
992 get_emergency_buffer(IV *size)
993 {
994 char *pv = emergency_buffer_prepared;
996 *size = MallocCfg[MallocCfg_emergency_buffer_prepared_size];
997 emergency_buffer_prepared = 0;
998 MallocCfg[MallocCfg_emergency_buffer_prepared_size] = 0;
999 return pv;
1000 }
1002 # define GET_EMERGENCY_BUFFER(p) get_emergency_buffer(p)
1003 # else /* NO_MALLOC_DYNAMIC_CFG */
1004 # define GET_EMERGENCY_BUFFER(p) NULL
1005 # endif
1007 static Malloc_t
1008 emergency_sbrk(MEM_SIZE size)
1009 {
1010 MEM_SIZE rsize = (((size - 1)>>LOG_OF_MIN_ARENA) + 1)<<LOG_OF_MIN_ARENA;
1012 if (size >= BIG_SIZE
1013 && (!emergency_buffer_last_req ||
1014 (size < (MEM_SIZE)emergency_buffer_last_req))) {
1015 /* Give the possibility to recover, but avoid an infinite cycle. */
1016 MALLOC_UNLOCK;
1017 emergency_buffer_last_req = size;
1018 emergency_sbrk_croak("Out of memory during \"large\" request for %" UVuf
1019 " bytes, total sbrk() is %" UVuf " bytes",
1020 (UV)size, (UV)(goodsbrk + sbrk_slack));
1021 }
1023 if ((MEM_SIZE)emergency_buffer_size >= rsize) {
1024 char *old = emergency_buffer;
1026 emergency_buffer_size -= rsize;
1027 emergency_buffer += rsize;
1028 return old;
1029 } else {
1030 /* First offense, give a possibility to recover by dieing. */
1031 /* No malloc involved here: */
1032 IV Size;
1033 char *pv = GET_EMERGENCY_BUFFER(&Size);
1034 int have = 0;
1036 if (emergency_buffer_size) {
1037 add_to_chain(emergency_buffer, emergency_buffer_size, 0);
1038 emergency_buffer_size = 0;
1039 emergency_buffer = NULL;
1040 have = 1;
1041 }
1043 if (!pv)
1044 pv = PERL_GET_EMERGENCY_BUFFER(&Size);
1045 if (!pv) {
1046 if (have)
1047 goto do_croak;
1048 return (char *)-1; /* Now die die die... */
1049 }
1051 /* Check alignment: */
1052 if (PTR2UV(pv) & (NEEDED_ALIGNMENT - 1)) {
1053 dTHX;
1055 PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
1056 return (char *)-1; /* die die die */
1057 }
1059 emergency_buffer = pv;
1060 emergency_buffer_size = Size;
1061 }
1062 do_croak:
1063 MALLOC_UNLOCK;
1064 emergency_sbrk_croak("Out of memory during request for %" UVuf
1065 " bytes, total sbrk() is %" UVuf " bytes",
1066 (UV)size, (UV)(goodsbrk + sbrk_slack));
1067 NOT_REACHED; /* NOTREACHED */
1068 return NULL;
1069 }
1071 #else /* !defined(PERL_EMERGENCY_SBRK) */
1072 # define emergency_sbrk(size) -1
1073 #endif /* defined PERL_EMERGENCY_SBRK */
1075 /* Don't use PerlIO buffered writes as they allocate memory. */
1076 #define MYMALLOC_WRITE2STDERR(s) PERL_UNUSED_RESULT(PerlLIO_write(PerlIO_fileno(PerlIO_stderr()),s,strlen(s)))
1078 #ifdef DEBUGGING
1079 #undef ASSERT
1080 #define ASSERT(p,diag) if (!(p)) botch(diag,STRINGIFY(p),__FILE__,__LINE__);
1082 static void
1083 botch(const char *diag, const char *s, const char *file, int line)
1084 {
1085 dVAR;
1086 dTHX;
1087 if (!(PERL_MAYBE_ALIVE && PERL_GET_THX))
1088 goto do_write;
1089 else {
1090 if (PerlIO_printf(PerlIO_stderr(),
1091 "assertion botched (%s?): %s %s:%d\n",
1092 diag, s, file, line) != 0) {
1093 do_write: /* Can be initializing interpreter */
1094 MYMALLOC_WRITE2STDERR("assertion botched (");
1095 MYMALLOC_WRITE2STDERR(diag);
1096 MYMALLOC_WRITE2STDERR("?): ");
1097 MYMALLOC_WRITE2STDERR(s);
1098 MYMALLOC_WRITE2STDERR(" (");
1099 MYMALLOC_WRITE2STDERR(file);
1100 MYMALLOC_WRITE2STDERR(":");
1101 {
1102 char linebuf[10];
1103 char *s = linebuf + sizeof(linebuf) - 1;
1104 int n = line;
1105 *s = 0;
1106 do {
1107 *--s = '0' + (n % 10);
1108 } while (n /= 10);
1109 MYMALLOC_WRITE2STDERR(s);
1110 }
1111 MYMALLOC_WRITE2STDERR(")\n");
1112 }
1113 PerlProc_abort();
1114 }
1115 }
1116 #else
1117 #define ASSERT(p, diag)
1118 #endif
1120 #ifdef MALLOC_FILL
1121 /* Fill should be long enough to cover long */
1122 static void
1123 fill_pat_4bytes(unsigned char *s, size_t nbytes, const unsigned char *fill)
1124 {
1125 unsigned char *e = s + nbytes;
1126 long *lp;
1127 const long lfill = *(long*)fill;
1129 if (PTR2UV(s) & (sizeof(long)-1)) { /* Align the pattern */
1130 int shift = sizeof(long) - (PTR2UV(s) & (sizeof(long)-1));
1131 unsigned const char *f = fill + sizeof(long) - shift;
1132 unsigned char *e1 = s + shift;
1134 while (s < e1)
1135 *s++ = *f++;
1136 }
1137 lp = (long*)s;
1138 while ((unsigned char*)(lp + 1) <= e)
1139 *lp++ = lfill;
1140 s = (unsigned char*)lp;
1141 while (s < e)
1142 *s++ = *fill++;
1143 }
1144 /* Just malloc()ed */
1145 static const unsigned char fill_feedadad[] =
1146 {0xFE, 0xED, 0xAD, 0xAD, 0xFE, 0xED, 0xAD, 0xAD,
1147 0xFE, 0xED, 0xAD, 0xAD, 0xFE, 0xED, 0xAD, 0xAD};
1148 /* Just free()ed */
1149 static const unsigned char fill_deadbeef[] =
1150 {0xDE, 0xAD, 0xBE, 0xEF, 0xDE, 0xAD, 0xBE, 0xEF,
1151 0xDE, 0xAD, 0xBE, 0xEF, 0xDE, 0xAD, 0xBE, 0xEF};
1152 # define FILL_DEADBEEF(s, n) \
1153 (void)(FILL_DEAD? (fill_pat_4bytes((s), (n), fill_deadbeef), 0) : 0)
1154 # define FILL_FEEDADAD(s, n) \
1155 (void)(FILL_ALIVE? (fill_pat_4bytes((s), (n), fill_feedadad), 0) : 0)
1156 #else
1157 # define FILL_DEADBEEF(s, n) ((void)0)
1158 # define FILL_FEEDADAD(s, n) ((void)0)
1159 # undef MALLOC_FILL_CHECK
1160 #endif
1162 #ifdef MALLOC_FILL_CHECK
1163 static int
1164 cmp_pat_4bytes(unsigned char *s, size_t nbytes, const unsigned char *fill)
1165 {
1166 unsigned char *e = s + nbytes;
1167 long *lp;
1168 const long lfill = *(long*)fill;
1170 if (PTR2UV(s) & (sizeof(long)-1)) { /* Align the pattern */
1171 int shift = sizeof(long) - (PTR2UV(s) & (sizeof(long)-1));
1172 unsigned const char *f = fill + sizeof(long) - shift;
1173 unsigned char *e1 = s + shift;
1175 while (s < e1)
1176 if (*s++ != *f++)
1177 return 1;
1178 }
1179 lp = (long*)s;
1180 while ((unsigned char*)(lp + 1) <= e)
1181 if (*lp++ != lfill)
1182 return 1;
1183 s = (unsigned char*)lp;
1184 while (s < e)
1185 if (*s++ != *fill++)
1186 return 1;
1187 return 0;
1188 }
1189 # define FILLCHECK_DEADBEEF(s, n) \
1190 ASSERT(!FILL_CHECK || !cmp_pat_4bytes(s, n, fill_deadbeef), \
1191 "free()ed/realloc()ed-away memory was overwritten")
1192 #else
1193 # define FILLCHECK_DEADBEEF(s, n) ((void)0)
1194 #endif
1196 STATIC int
1197 S_adjust_size_and_find_bucket(size_t *nbytes_p)
1198 {
1199 MEM_SIZE shiftr;
1200 int bucket;
1201 size_t nbytes;
1203 PERL_ARGS_ASSERT_ADJUST_SIZE_AND_FIND_BUCKET;
1205 nbytes = *nbytes_p;
1207 /*
1208 * Convert amount of memory requested into
1209 * closest block size stored in hash buckets
1210 * which satisfies request. Account for
1211 * space used per block for accounting.
1212 */
1213 #ifdef PACK_MALLOC
1214 # ifdef SMALL_BUCKET_VIA_TABLE
1215 if (nbytes == 0)
1216 bucket = MIN_BUCKET;
1217 else if (nbytes <= SIZE_TABLE_MAX) {
1218 bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT];
1219 } else
1220 # else
1221 if (nbytes == 0)
1222 nbytes = 1;
1223 if (nbytes <= MAX_POW2_ALGO) goto do_shifts;
1224 else
1225 # endif
1226 #endif
1227 {
1228 POW2_OPTIMIZE_ADJUST(nbytes);
1229 nbytes += M_OVERHEAD;
1230 nbytes = (nbytes + 3) &~ 3;
1231 #if defined(PACK_MALLOC) && !defined(SMALL_BUCKET_VIA_TABLE)
1232 do_shifts:
1233 #endif
1234 shiftr = (nbytes - 1) >> START_SHIFT;
1235 bucket = START_SHIFTS_BUCKET;
1236 /* apart from this loop, this is O(1) */
1237 while (shiftr >>= 1)
1238 bucket += BUCKETS_PER_POW2;
1239 }
1240 *nbytes_p = nbytes;
1241 return bucket;
1242 }
1244 Malloc_t
1245 Perl_malloc(size_t nbytes)
1246 {
1247 dVAR;
1248 union overhead *p;
1249 int bucket;
1251 #if defined(DEBUGGING) || defined(RCHECK)
1252 MEM_SIZE size = nbytes;
1253 #endif
1255 BARK_64K_LIMIT("Allocation",nbytes,nbytes);
1256 #ifdef DEBUGGING
1257 if ((long)nbytes < 0)
1258 croak("%s", "panic: malloc");
1259 #endif
1261 bucket = adjust_size_and_find_bucket(&nbytes);
1262 MALLOC_LOCK;
1263 /*
1264 * If nothing in hash bucket right now,
1265 * request more memory from the system.
1266 */
1267 if (nextf[bucket] == NULL)
1268 morecore(bucket);
1269 if ((p = nextf[bucket]) == NULL) {
1270 MALLOC_UNLOCK;
1271 {
1272 dTHX;
1273 if (!PL_nomemok) {
1274 #if defined(PLAIN_MALLOC) && defined(NO_FANCY_MALLOC)
1275 MYMALLOC_WRITE2STDERR("Out of memory!\n");
1276 #else
1277 char buff[80];
1278 char *eb = buff + sizeof(buff) - 1;
1279 char *s = eb;
1280 size_t n = nbytes;
1282 MYMALLOC_WRITE2STDERR("Out of memory during request for ");
1283 #if defined(DEBUGGING) || defined(RCHECK)
1284 n = size;
1285 #endif
1286 *s = 0;
1287 do {
1288 *--s = '0' + (n % 10);
1289 } while (n /= 10);
1290 MYMALLOC_WRITE2STDERR(s);
1291 MYMALLOC_WRITE2STDERR(" bytes, total sbrk() is ");
1292 s = eb;
1293 n = goodsbrk + sbrk_slack;
1294 do {
1295 *--s = '0' + (n % 10);
1296 } while (n /= 10);
1297 MYMALLOC_WRITE2STDERR(s);
1298 MYMALLOC_WRITE2STDERR(" bytes!\n");
1299 #endif /* defined(PLAIN_MALLOC) && defined(NO_FANCY_MALLOC) */
1300 my_exit(1);
1301 }
1302 }
1303 return (NULL);
1304 }
1306 /* remove from linked list */
1307 #ifdef DEBUGGING
1308 if ( (PTR2UV(p) & (MEM_ALIGNBYTES - 1))
1309 /* Can't get this low */
1310 || (p && PTR2UV(p) < (1<<LOG_OF_MIN_ARENA)) ) {
1311 dTHX;
1312 PerlIO_printf(PerlIO_stderr(),
1313 "Unaligned pointer in the free chain 0x%" UVxf "\n",
1314 PTR2UV(p));
1315 }
1316 if ( (PTR2UV(p->ov_next) & (MEM_ALIGNBYTES - 1))
1317 || (p->ov_next && PTR2UV(p->ov_next) < (1<<LOG_OF_MIN_ARENA)) ) {
1318 dTHX;
1319 PerlIO_printf(PerlIO_stderr(),
1320 "Unaligned \"next\" pointer in the free "
1321 "chain 0x%" UVxf " at 0x%" UVxf "\n",
1322 PTR2UV(p->ov_next), PTR2UV(p));
1323 }
1324 #endif
1325 nextf[bucket] = p->ov_next;
1327 MALLOC_UNLOCK;
1329 DEBUG_m(PerlIO_printf(Perl_debug_log,
1330 "0x% "UVxf ": (%05lu) malloc %ld bytes\n",
1331 PTR2UV((Malloc_t)(p + CHUNK_SHIFT)), (unsigned long)(PL_an++),
1332 (long)size));
1334 FILLCHECK_DEADBEEF((unsigned char*)(p + CHUNK_SHIFT),
1335 BUCKET_SIZE_REAL(bucket) + RMAGIC_SZ);
1337 #ifdef IGNORE_SMALL_BAD_FREE
1338 if (bucket >= FIRST_BUCKET_WITH_CHECK)
1339 #endif
1340 OV_MAGIC(p, bucket) = MAGIC;
1341 #ifndef PACK_MALLOC
1342 OV_INDEX(p) = bucket;
1343 #endif
1344 #ifdef RCHECK
1345 /*
1346 * Record allocated size of block and
1347 * bound space with magic numbers.
1348 */
1349 p->ov_rmagic = RMAGIC;
1350 if (bucket <= MAX_SHORT_BUCKET) {
1351 int i;
1353 nbytes = size + M_OVERHEAD;
1354 p->ov_size = nbytes - 1;
1355 if ((i = nbytes & (RMAGIC_SZ-1))) {
1356 i = RMAGIC_SZ - i;
1357 while (i--) /* nbytes - RMAGIC_SZ is end of alloced area */
1358 ((caddr_t)p + nbytes - RMAGIC_SZ)[i] = RMAGIC_C;
1359 }
1360 /* Same at RMAGIC_SZ-aligned RMAGIC */
1361 nbytes = (nbytes + RMAGIC_SZ - 1) & ~(RMAGIC_SZ - 1);
1362 ((u_int *)((caddr_t)p + nbytes))[-1] = RMAGIC;
1363 }
1364 FILL_FEEDADAD((unsigned char *)(p + CHUNK_SHIFT), size);
1365 #endif
1366 return ((Malloc_t)(p + CHUNK_SHIFT));
1367 }
1369 static char *last_sbrk_top;
1370 static char *last_op; /* This arena can be easily extended. */
1371 static MEM_SIZE sbrked_remains;
1373 #ifdef DEBUGGING_MSTATS
1374 static int sbrks;
1375 #endif
1377 struct chunk_chain_s {
1378 struct chunk_chain_s *next;
1379 MEM_SIZE size;
1380 };
1381 static struct chunk_chain_s *chunk_chain;
1382 static int n_chunks;
1383 static char max_bucket;
1385 /* Cutoff a piece of one of the chunks in the chain. Prefer smaller chunk. */
1386 static void *
1387 get_from_chain(MEM_SIZE size)
1388 {
1389 struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain;
1390 struct chunk_chain_s **oldgoodp = NULL;
1391 long min_remain = LONG_MAX;
1393 while (elt) {
1394 if (elt->size >= size) {
1395 long remains = elt->size - size;
1396 if (remains >= 0 && remains < min_remain) {
1397 oldgoodp = oldp;
1398 min_remain = remains;
1399 }
1400 if (remains == 0) {
1401 break;
1402 }
1403 }
1404 oldp = &( elt->next );
1405 elt = elt->next;
1406 }
1407 if (!oldgoodp) return NULL;
1408 if (min_remain) {
1409 void *ret = *oldgoodp;
1410 struct chunk_chain_s *next = (*oldgoodp)->next;
1412 *oldgoodp = (struct chunk_chain_s *)((char*)ret + size);
1413 (*oldgoodp)->size = min_remain;
1414 (*oldgoodp)->next = next;
1415 return ret;
1416 } else {
1417 void *ret = *oldgoodp;
1418 *oldgoodp = (*oldgoodp)->next;
1419 n_chunks--;
1420 return ret;
1421 }
1422 }
1424 static void
1425 add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip)
1426 {
1427 struct chunk_chain_s *next = chunk_chain;
1428 char *cp = (char*)p;
1430 cp += chip;
1431 chunk_chain = (struct chunk_chain_s *)cp;
1432 chunk_chain->size = size - chip;
1433 chunk_chain->next = next;
1434 n_chunks++;
1435 }
1437 static void *
1438 get_from_bigger_buckets(int bucket, MEM_SIZE size)
1439 {
1440 int price = 1;
1441 static int bucketprice[NBUCKETS];
1442 while (bucket <= max_bucket) {
1443 /* We postpone stealing from bigger buckets until we want it
1444 often enough. */
1445 if (nextf[bucket] && bucketprice[bucket]++ >= price) {
1446 /* Steal it! */
1447 void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT);
1448 bucketprice[bucket] = 0;
1449 if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) {
1450 last_op = NULL; /* Disable optimization */
1451 }
1452 nextf[bucket] = nextf[bucket]->ov_next;
1453 #ifdef DEBUGGING_MSTATS
1454 nmalloc[bucket]--;
1455 start_slack -= M_OVERHEAD;
1456 #endif
1457 add_to_chain(ret, (BUCKET_SIZE_NO_SURPLUS(bucket) +
1458 POW2_OPTIMIZE_SURPLUS(bucket)),
1459 size);
1460 return ret;
1461 }
1462 bucket++;
1463 }
1464 return NULL;
1465 }
1467 static union overhead *
1468 getpages(MEM_SIZE needed, int *nblksp, int bucket)
1469 {
1470 dVAR;
1471 /* Need to do (possibly expensive) system call. Try to
1472 optimize it for rare calling. */
1473 MEM_SIZE require = needed - sbrked_remains;
1474 char *cp;
1475 union overhead *ovp;
1476 MEM_SIZE slack = 0;
1478 if (sbrk_goodness > 0) {
1479 if (!last_sbrk_top && require < (MEM_SIZE)FIRST_SBRK)
1480 require = FIRST_SBRK;
1481 else if (require < (MEM_SIZE)MIN_SBRK) require = MIN_SBRK;
1483 if (require < (Size_t)(goodsbrk * MIN_SBRK_FRAC1000 / 1000))
1484 require = goodsbrk * MIN_SBRK_FRAC1000 / 1000;
1485 require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK;
1486 } else {
1487 require = needed;
1488 last_sbrk_top = 0;
1489 sbrked_remains = 0;
1490 }
1492 DEBUG_m(PerlIO_printf(Perl_debug_log,
1493 "sbrk(%ld) for %ld-byte-long arena\n",
1494 (long)require, (long) needed));
1495 cp = (char *)sbrk(require);
1496 #ifdef DEBUGGING_MSTATS
1497 sbrks++;
1498 #endif
1499 if (cp == last_sbrk_top) {
1500 /* Common case, anything is fine. */
1501 sbrk_goodness++;
1502 ovp = (union overhead *) (cp - sbrked_remains);
1503 last_op = cp - sbrked_remains;
1504 sbrked_remains = require - (needed - sbrked_remains);
1505 } else if (cp == (char *)-1) { /* no more room! */
1506 ovp = (union overhead *)emergency_sbrk(needed);
1507 if (ovp == (union overhead *)-1)
1508 return 0;
1509 if (((char*)ovp) > last_op) { /* Cannot happen with current emergency_sbrk() */
1510 last_op = 0;
1511 }
1512 return ovp;
1513 } else { /* Non-continuous or first sbrk(). */
1514 long add = sbrked_remains;
1515 char *newcp;
1517 if (sbrked_remains) { /* Put rest into chain, we
1518 cannot use it right now. */
1519 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1520 sbrked_remains, 0);
1521 }
1523 /* Second, check alignment. */
1524 slack = 0;
1526 /* WANTED_ALIGNMENT may be more than NEEDED_ALIGNMENT, but this may
1527 improve performance of memory access. */
1528 if (PTR2UV(cp) & (WANTED_ALIGNMENT - 1)) { /* Not aligned. */
1529 slack = WANTED_ALIGNMENT - (PTR2UV(cp) & (WANTED_ALIGNMENT - 1));
1530 add += slack;
1531 }
1533 if (add) {
1534 DEBUG_m(PerlIO_printf(Perl_debug_log,
1535 "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignment,\t%ld were assumed to come from the tail of the previous sbrk\n",
1536 (long)add, (long) slack,
1537 (long) sbrked_remains));
1538 newcp = (char *)sbrk(add);
1539 #if defined(DEBUGGING_MSTATS)
1540 sbrks++;
1541 sbrk_slack += add;
1542 #endif
1543 if (newcp != cp + require) {
1544 /* Too bad: even rounding sbrk() is not continuous.*/
1545 DEBUG_m(PerlIO_printf(Perl_debug_log,
1546 "failed to fix bad sbrk()\n"));
1547 #ifdef PACK_MALLOC
1548 if (slack) {
1549 MALLOC_UNLOCK;
1550 fatalcroak("panic: Off-page sbrk\n");
1551 }
1552 #endif
1553 if (sbrked_remains) {
1554 /* Try again. */
1555 #if defined(DEBUGGING_MSTATS)
1556 sbrk_slack += require;
1557 #endif
1558 require = needed;
1559 DEBUG_m(PerlIO_printf(Perl_debug_log,
1560 "straight sbrk(%ld)\n",
1561 (long)require));
1562 cp = (char *)sbrk(require);
1563 #ifdef DEBUGGING_MSTATS
1564 sbrks++;
1565 #endif
1566 if (cp == (char *)-1)
1567 return 0;
1568 }
1569 sbrk_goodness = -1; /* Disable optimization!
1570 Continue with not-aligned... */
1571 } else {
1572 cp += slack;
1573 require += sbrked_remains;
1574 }
1575 }
1577 if (last_sbrk_top) {
1578 sbrk_goodness -= SBRK_FAILURE_PRICE;
1579 }
1581 ovp = (union overhead *) cp;
1582 /*
1583 * Round up to minimum allocation size boundary
1584 * and deduct from block count to reflect.
1585 */
1587 # if NEEDED_ALIGNMENT > MEM_ALIGNBYTES
1588 if (PTR2UV(ovp) & (NEEDED_ALIGNMENT - 1))
1589 fatalcroak("Misalignment of sbrk()\n");
1590 else
1591 # endif
1592 if (PTR2UV(ovp) & (MEM_ALIGNBYTES - 1)) {
1593 DEBUG_m(PerlIO_printf(Perl_debug_log,
1594 "fixing sbrk(): %d bytes off machine alignment\n",
1595 (int)(PTR2UV(ovp) & (MEM_ALIGNBYTES - 1))));
1596 ovp = INT2PTR(union overhead *,(PTR2UV(ovp) + MEM_ALIGNBYTES) &
1597 (MEM_ALIGNBYTES - 1));
1598 (*nblksp)--;
1599 # if defined(DEBUGGING_MSTATS)
1600 /* This is only approx. if TWO_POT_OPTIMIZE: */
1601 sbrk_slack += (1 << (bucket >> BUCKET_POW2_SHIFT));
1602 # endif
1603 }
1604 ; /* Finish "else" */
1605 sbrked_remains = require - needed;
1606 last_op = cp;
1607 }
1608 #if !defined(PLAIN_MALLOC) && !defined(NO_FANCY_MALLOC)
1609 emergency_buffer_last_req = 0;
1610 #endif
1611 last_sbrk_top = cp + require;
1612 #ifdef DEBUGGING_MSTATS
1613 goodsbrk += require;
1614 #endif
1615 return ovp;
1616 }
1618 static int
1619 getpages_adjacent(MEM_SIZE require)
1620 {
1621 if (require <= sbrked_remains) {
1622 sbrked_remains -= require;
1623 } else {
1624 char *cp;
1626 require -= sbrked_remains;
1627 /* We do not try to optimize sbrks here, we go for place. */
1628 cp = (char*) sbrk(require);
1629 #ifdef DEBUGGING_MSTATS
1630 sbrks++;
1631 goodsbrk += require;
1632 #endif
1633 if (cp == last_sbrk_top) {
1634 sbrked_remains = 0;
1635 last_sbrk_top = cp + require;
1636 } else {
1637 if (cp == (char*)-1) { /* Out of memory */
1638 #ifdef DEBUGGING_MSTATS
1639 goodsbrk -= require;
1640 #endif
1641 return 0;
1642 }
1643 /* Report the failure: */
1644 if (sbrked_remains)
1645 add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1646 sbrked_remains, 0);
1647 add_to_chain((void*)cp, require, 0);
1648 sbrk_goodness -= SBRK_FAILURE_PRICE;
1649 sbrked_remains = 0;
1650 last_sbrk_top = 0;
1651 last_op = 0;
1652 return 0;
1653 }
1654 }
1656 return 1;
1657 }
1659 /*
1660 * Allocate more memory to the indicated bucket.
1661 */
1662 static void
1663 morecore(int bucket)
1664 {
1665 dVAR;
1666 union overhead *ovp;
1667 int rnu; /* 2^rnu bytes will be requested */
1668 int nblks; /* become nblks blocks of the desired size */
1669 MEM_SIZE siz, needed;
1670 static int were_called = 0;
1672 if (nextf[bucket])
1673 return;
1674 #ifndef NO_PERL_MALLOC_ENV
1675 if (!were_called) {
1676 /* It's the our first time. Initialize ourselves */
1677 were_called = 1; /* Avoid a loop */
1678 if (!MallocCfg[MallocCfg_skip_cfg_env]) {
1679 char *s = getenv("PERL_MALLOC_OPT"), *t = s, *off;
1680 const char *opts = PERL_MALLOC_OPT_CHARS;
1681 int changed = 0;
1683 while ( t && t[0] && t[1] == '='
1684 && ((off = strchr(opts, *t))) ) {
1685 IV val = 0;
1687 t += 2;
1688 while (*t <= '9' && *t >= '0')
1689 val = 10*val + *t++ - '0';
1690 if (!*t || *t == ';') {
1691 if (MallocCfg[off - opts] != val)
1692 changed = 1;
1693 MallocCfg[off - opts] = val;
1694 if (*t)
1695 t++;
1696 }
1697 }
1698 if (t && *t) {
1699 dTHX;
1700 MYMALLOC_WRITE2STDERR("Unrecognized part of PERL_MALLOC_OPT: \"");
1701 MYMALLOC_WRITE2STDERR(t);
1702 MYMALLOC_WRITE2STDERR("\"\n");
1703 }
1704 if (changed)
1705 MallocCfg[MallocCfg_cfg_env_read] = 1;
1706 }
1707 }
1708 #endif
1709 if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) {
1710 MALLOC_UNLOCK;
1711 croak("%s", "Out of memory during ridiculously large request");
1712 }
1713 if (bucket > max_bucket)
1714 max_bucket = bucket;
1716 rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT))
1717 ? LOG_OF_MIN_ARENA
1718 : (bucket >> BUCKET_POW2_SHIFT) );
1719 /* This may be overwritten later: */
1720 nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */
1721 needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket);
1722 if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */
1723 ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT;
1724 nextf[rnu << BUCKET_POW2_SHIFT]
1725 = nextf[rnu << BUCKET_POW2_SHIFT]->ov_next;
1726 #ifdef DEBUGGING_MSTATS
1727 nmalloc[rnu << BUCKET_POW2_SHIFT]--;
1728 start_slack -= M_OVERHEAD;
1729 #endif
1730 DEBUG_m(PerlIO_printf(Perl_debug_log,
1731 "stealing %ld bytes from %ld arena\n",
1732 (long) needed, (long) rnu << BUCKET_POW2_SHIFT));
1733 } else if (chunk_chain
1734 && (ovp = (union overhead*) get_from_chain(needed))) {
1735 DEBUG_m(PerlIO_printf(Perl_debug_log,
1736 "stealing %ld bytes from chain\n",
1737 (long) needed));
1738 } else if ( (ovp = (union overhead*)
1739 get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1,
1740 needed)) ) {
1741 DEBUG_m(PerlIO_printf(Perl_debug_log,
1742 "stealing %ld bytes from bigger buckets\n",
1743 (long) needed));
1744 } else if (needed <= sbrked_remains) {
1745 ovp = (union overhead *)(last_sbrk_top - sbrked_remains);
1746 sbrked_remains -= needed;
1747 last_op = (char*)ovp;
1748 } else
1749 ovp = getpages(needed, &nblks, bucket);
1751 if (!ovp)
1752 return;
1753 FILL_DEADBEEF((unsigned char*)ovp, needed);
1755 /*
1756 * Add new memory allocated to that on
1757 * free list for this hash bucket.
1758 */
1759 siz = BUCKET_SIZE_NO_SURPLUS(bucket); /* No surplus if nblks > 1 */
1760 #ifdef PACK_MALLOC
1761 *(u_char*)ovp = bucket; /* Fill index. */
1762 if (bucket <= MAX_PACKED) {
1763 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
1764 nblks = N_BLKS(bucket);
1765 # ifdef DEBUGGING_MSTATS
1766 start_slack += BLK_SHIFT(bucket);
1767 # endif
1768 } else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) {
1769 ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
1770 siz -= sizeof(union overhead);
1771 } else ovp++; /* One chunk per block. */
1772 #endif /* PACK_MALLOC */
1773 nextf[bucket] = ovp;
1774 #ifdef DEBUGGING_MSTATS
1775 nmalloc[bucket] += nblks;
1776 if (bucket > MAX_PACKED) {
1777 start_slack += M_OVERHEAD * nblks;
1778 }
1779 #endif
1781 while (--nblks > 0) {
1782 ovp->ov_next = (union overhead *)((caddr_t)ovp + siz);
1783 ovp = (union overhead *)((caddr_t)ovp + siz);
1784 }
1785 /* Not all sbrks return zeroed memory.*/
1786 ovp->ov_next = (union overhead *)NULL;
1787 #ifdef PACK_MALLOC
1788 if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */
1789 union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next;
1790 nextf[7*BUCKETS_PER_POW2] =
1791 (union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2]
1792 - sizeof(union overhead));
1793 nextf[7*BUCKETS_PER_POW2]->ov_next = n_op;
1794 }
1795 #endif /* !PACK_MALLOC */
1796 }
1798 Free_t
1799 Perl_mfree(Malloc_t where)
1800 {
1801 dVAR;
1802 MEM_SIZE size;
1803 union overhead *ovp;
1804 char *cp = (char*)where;
1805 #ifdef PACK_MALLOC
1806 u_char bucket;
1807 #endif
1809 DEBUG_m(PerlIO_printf(Perl_debug_log,
1810 "0x%" UVxf ": (%05lu) free\n",
1811 PTR2UV(cp), (unsigned long)(PL_an++)));
1813 if (cp == NULL)
1814 return;
1815 #ifdef DEBUGGING
1816 if (PTR2UV(cp) & (MEM_ALIGNBYTES - 1))
1817 croak("%s", "wrong alignment in free()");
1818 #endif
1819 ovp = (union overhead *)((caddr_t)cp
1820 - sizeof (union overhead) * CHUNK_SHIFT);
1821 #ifdef PACK_MALLOC
1822 bucket = OV_INDEX(ovp);
1823 #endif
1824 #ifdef IGNORE_SMALL_BAD_FREE
1825 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
1826 && (OV_MAGIC(ovp, bucket) != MAGIC))
1827 #else
1828 if (OV_MAGIC(ovp, bucket) != MAGIC)
1829 #endif
1830 {
1831 static int bad_free_warn = -1;
1832 if (bad_free_warn == -1) {
1833 dTHX;
1834 char *pbf = PerlEnv_getenv("PERL_BADFREE");
1835 bad_free_warn = (pbf) ? strNE("0", pbf) : 1;
1836 }
1837 if (!bad_free_warn)
1838 return;
1839 #ifdef RCHECK
1840 {
1841 dTHX;
1842 if (!PERL_IS_ALIVE || !PL_curcop)
1843 Perl_ck_warner_d(aTHX_ packWARN(WARN_MALLOC), "%s free() ignored (RMAGIC, PERL_CORE)",
1844 ovp->ov_rmagic == RMAGIC - 1 ?
1845 "Duplicate" : "Bad");
1846 }
1847 #else
1848 {
1849 dTHX;
1850 if (!PERL_IS_ALIVE || !PL_curcop)
1851 Perl_ck_warner_d(aTHX_ packWARN(WARN_MALLOC), "%s", "Bad free() ignored (PERL_CORE)");
1852 }
1853 #endif
1854 return; /* sanity */
1855 }
1856 #ifdef RCHECK
1857 ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite");
1858 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1859 int i;
1860 MEM_SIZE nbytes = ovp->ov_size + 1;
1862 if ((i = nbytes & (RMAGIC_SZ-1))) {
1863 i = RMAGIC_SZ - i;
1864 while (i--) { /* nbytes - RMAGIC_SZ is end of alloced area */
1865 ASSERT(((caddr_t)ovp + nbytes - RMAGIC_SZ)[i] == RMAGIC_C,
1866 "chunk's tail overwrite");
1867 }
1868 }
1869 /* Same at RMAGIC_SZ-aligned RMAGIC */
1870 nbytes = (nbytes + (RMAGIC_SZ-1)) & ~(RMAGIC_SZ-1);
1871 ASSERT(((u_int *)((caddr_t)ovp + nbytes))[-1] == RMAGIC,
1872 "chunk's tail overwrite");
1873 FILLCHECK_DEADBEEF((unsigned char*)((caddr_t)ovp + nbytes),
1874 BUCKET_SIZE(OV_INDEX(ovp)) - nbytes);
1875 }
1876 FILL_DEADBEEF((unsigned char*)(ovp+CHUNK_SHIFT),
1877 BUCKET_SIZE_REAL(OV_INDEX(ovp)) + RMAGIC_SZ);
1878 ovp->ov_rmagic = RMAGIC - 1;
1879 #endif
1880 ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite");
1881 size = OV_INDEX(ovp);
1883 MALLOC_LOCK;
1884 ovp->ov_next = nextf[size];
1885 nextf[size] = ovp;
1886 MALLOC_UNLOCK;
1887 }
1889 /* There is no need to do any locking in realloc (with an exception of
1890 trying to grow in place if we are at the end of the chain).
1891 If somebody calls us from a different thread with the same address,
1892 we are sole anyway. */
1894 Malloc_t
1895 Perl_realloc(void *mp, size_t nbytes)
1896 {
1897 dVAR;
1898 MEM_SIZE onb;
1899 union overhead *ovp;
1900 char *res;
1901 int prev_bucket;
1902 int bucket;
1903 int incr; /* 1 if does not fit, -1 if "easily" fits in a
1904 smaller bucket, otherwise 0. */
1905 char *cp = (char*)mp;
1907 #ifdef DEBUGGING
1908 MEM_SIZE size = nbytes;
1910 if ((long)nbytes < 0)
1911 croak("%s", "panic: realloc");
1912 #endif
1914 BARK_64K_LIMIT("Reallocation",nbytes,size);
1915 if (!cp)
1916 return Perl_malloc(nbytes);
1918 ovp = (union overhead *)((caddr_t)cp
1919 - sizeof (union overhead) * CHUNK_SHIFT);
1920 bucket = OV_INDEX(ovp);
1922 #ifdef IGNORE_SMALL_BAD_FREE
1923 if ((bucket >= FIRST_BUCKET_WITH_CHECK)
1924 && (OV_MAGIC(ovp, bucket) != MAGIC))
1925 #else
1926 if (OV_MAGIC(ovp, bucket) != MAGIC)
1927 #endif
1928 {
1929 static int bad_free_warn = -1;
1930 if (bad_free_warn == -1) {
1931 dTHX;
1932 char *pbf = PerlEnv_getenv("PERL_BADFREE");
1933 bad_free_warn = (pbf) ? strNE("0", pbf) : 1;
1934 }
1935 if (!bad_free_warn)
1936 return NULL;
1937 #ifdef RCHECK
1938 {
1939 dTHX;
1940 if (!PERL_IS_ALIVE || !PL_curcop)
1941 Perl_ck_warner_d(aTHX_ packWARN(WARN_MALLOC), "%srealloc() %signored",
1942 (ovp->ov_rmagic == RMAGIC - 1 ? "" : "Bad "),
1943 ovp->ov_rmagic == RMAGIC - 1
1944 ? "of freed memory " : "");
1945 }
1946 #else
1947 {
1948 dTHX;
1949 if (!PERL_IS_ALIVE || !PL_curcop)
1950 Perl_ck_warner_d(aTHX_ packWARN(WARN_MALLOC), "%s",
1951 "Bad realloc() ignored");
1952 }
1953 #endif
1954 return NULL; /* sanity */
1955 }
1957 onb = BUCKET_SIZE_REAL(bucket);
1958 /*
1959 * avoid the copy if same size block.
1960 * We are not aggressive with boundary cases. Note that it might
1961 * (for a small number of cases) give false negative if
1962 * both new size and old one are in the bucket for
1963 * FIRST_BIG_POW2, but the new one is near the lower end.
1964 *
1965 * We do not try to go to 1.5 times smaller bucket so far.
1966 */
1967 if (nbytes > onb) incr = 1;
1968 else {
1969 #ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING
1970 if ( /* This is a little bit pessimal if PACK_MALLOC: */
1971 nbytes > ( (onb >> 1) - M_OVERHEAD )
1972 # ifdef TWO_POT_OPTIMIZE
1973 || (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND )
1974 # endif
1975 )
1976 #else /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1977 prev_bucket = ( (bucket > MAX_PACKED + 1)
1978 ? bucket - BUCKETS_PER_POW2
1979 : bucket - 1);
1980 if (nbytes > BUCKET_SIZE_REAL(prev_bucket))
1981 #endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
1982 incr = 0;
1983 else incr = -1;
1984 }
1985 #ifdef STRESS_REALLOC
1986 goto hard_way;
1987 #endif
1988 if (incr == 0) {
1989 inplace_label:
1990 #ifdef RCHECK
1991 /*
1992 * Record new allocated size of block and
1993 * bound space with magic numbers.
1994 */
1995 if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
1996 int i, nb = ovp->ov_size + 1;
1998 if ((i = nb & (RMAGIC_SZ-1))) {
1999 i = RMAGIC_SZ - i;
2000 while (i--) { /* nb - RMAGIC_SZ is end of alloced area */
2001 ASSERT(((caddr_t)ovp + nb - RMAGIC_SZ)[i] == RMAGIC_C, "chunk's tail overwrite");
2002 }
2003 }
2004 /* Same at RMAGIC_SZ-aligned RMAGIC */
2005 nb = (nb + (RMAGIC_SZ-1)) & ~(RMAGIC_SZ-1);
2006 ASSERT(((u_int *)((caddr_t)ovp + nb))[-1] == RMAGIC,
2007 "chunk's tail overwrite");
2008 FILLCHECK_DEADBEEF((unsigned char*)((caddr_t)ovp + nb),
2009 BUCKET_SIZE(OV_INDEX(ovp)) - nb);
2010 if (nbytes > ovp->ov_size + 1 - M_OVERHEAD)
2011 FILL_FEEDADAD((unsigned char*)cp + ovp->ov_size + 1 - M_OVERHEAD,
2012 nbytes - (ovp->ov_size + 1 - M_OVERHEAD));
2013 else
2014 FILL_DEADBEEF((unsigned char*)cp + nbytes,
2015 nb - M_OVERHEAD + RMAGIC_SZ - nbytes);
2016 /*
2017 * Convert amount of memory requested into
2018 * closest block size stored in hash buckets
2019 * which satisfies request. Account for
2020 * space used per block for accounting.
2021 */
2022 nbytes += M_OVERHEAD;
2023 ovp->ov_size = nbytes - 1;
2024 if ((i = nbytes & (RMAGIC_SZ-1))) {
2025 i = RMAGIC_SZ - i;
2026 while (i--) /* nbytes - RMAGIC_SZ is end of alloced area */
2027 ((caddr_t)ovp + nbytes - RMAGIC_SZ)[i]
2028 = RMAGIC_C;
2029 }
2030 /* Same at RMAGIC_SZ-aligned RMAGIC */
2031 nbytes = (nbytes + (RMAGIC_SZ-1)) & ~(RMAGIC_SZ - 1);
2032 ((u_int *)((caddr_t)ovp + nbytes))[-1] = RMAGIC;
2033 }
2034 #endif
2035 res = cp;
2036 DEBUG_m(PerlIO_printf(Perl_debug_log,
2037 "0x%" UVxf ": (%05lu) realloc %ld bytes inplace\n",
2038 PTR2UV(res),(unsigned long)(PL_an++),
2039 (long)size));
2040 } else if (incr == 1 && (cp - M_OVERHEAD == last_op)
2041 && (onb > (1 << LOG_OF_MIN_ARENA))) {
2042 MEM_SIZE require, newarena = nbytes, pow;
2043 int shiftr;
2045 POW2_OPTIMIZE_ADJUST(newarena);
2046 newarena = newarena + M_OVERHEAD;
2047 /* newarena = (newarena + 3) &~ 3; */
2048 shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA;
2049 pow = LOG_OF_MIN_ARENA + 1;
2050 /* apart from this loop, this is O(1) */
2051 while (shiftr >>= 1)
2052 pow++;
2053 newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2);
2054 require = newarena - onb - M_OVERHEAD;
2056 MALLOC_LOCK;
2057 if (cp - M_OVERHEAD == last_op /* We *still* are the last chunk */
2058 && getpages_adjacent(require)) {
2059 #ifdef DEBUGGING_MSTATS
2060 nmalloc[bucket]--;
2061 nmalloc[pow * BUCKETS_PER_POW2]++;
2062 #endif
2063 if (pow * BUCKETS_PER_POW2 > (MEM_SIZE)max_bucket)
2064 max_bucket = pow * BUCKETS_PER_POW2;
2065 *(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */
2066 MALLOC_UNLOCK;
2067 goto inplace_label;
2068 } else {
2069 MALLOC_UNLOCK;
2070 goto hard_way;
2071 }
2072 } else {
2073 hard_way:
2074 DEBUG_m(PerlIO_printf(Perl_debug_log,
2075 "0x%" UVxf ": (%05lu) realloc %ld bytes the hard way\n",
2076 PTR2UV(cp),(unsigned long)(PL_an++),
2077 (long)size));
2078 if ((res = (char*)Perl_malloc(nbytes)) == NULL)
2079 return (NULL);
2080 if (cp != res) /* common optimization */
2081 Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char);
2082 Perl_mfree(cp);
2083 }
2084 return ((Malloc_t)res);
2085 }
2087 Malloc_t
2088 Perl_calloc(size_t elements, size_t size)
2089 {
2090 long sz = elements * size;
2091 Malloc_t p = Perl_malloc(sz);
2093 if (p) {
2094 memset((void*)p, 0, sz);
2095 }
2096 return p;
2097 }
2099 char *
2100 Perl_strdup(const char *s)
2101 {
2102 MEM_SIZE l = strlen(s);
2103 char *s1 = (char *)Perl_malloc(l+1);
2105 return (char *)CopyD(s, s1, (MEM_SIZE)(l+1), char);
2106 }
2108 int
2109 Perl_putenv(char *a)
2110 {
2111 /* Sometimes system's putenv conflicts with my_setenv() - this is system
2112 malloc vs Perl's free(). */
2113 dTHX;
2114 char *var;
2115 char *val = a;
2116 MEM_SIZE l;
2117 char buf[80];
2119 while (*val && *val != '=')
2120 val++;
2121 if (!*val)
2122 return -1;
2123 l = val - a;
2124 if (l < sizeof(buf))
2125 var = buf;
2126 else
2127 var = (char *)Perl_malloc(l + 1);
2128 Copy(a, var, l, char);
2129 var[l + 1] = 0;
2130 my_setenv(var, val+1);
2131 if (var != buf)
2132 Perl_mfree(var);
2133 return 0;
2134 }
2136 MEM_SIZE
2137 Perl_malloced_size(void *p)
2138 {
2139 union overhead * const ovp = (union overhead *)
2140 ((caddr_t)p - sizeof (union overhead) * CHUNK_SHIFT);
2141 const int bucket = OV_INDEX(ovp);
2143 PERL_ARGS_ASSERT_MALLOCED_SIZE;
2145 #ifdef RCHECK
2146 /* The caller wants to have a complete control over the chunk,
2147 disable the memory checking inside the chunk. */
2148 if (bucket <= MAX_SHORT_BUCKET) {
2149 const MEM_SIZE size = BUCKET_SIZE_REAL(bucket);
2150 ovp->ov_size = size + M_OVERHEAD - 1;
2151 *((u_int *)((caddr_t)ovp + size + M_OVERHEAD - RMAGIC_SZ)) = RMAGIC;
2152 }
2153 #endif
2154 return BUCKET_SIZE_REAL(bucket);
2155 }
2158 MEM_SIZE
2159 Perl_malloc_good_size(size_t wanted)
2160 {
2161 return BUCKET_SIZE_REAL(adjust_size_and_find_bucket(&wanted));
2162 }
2164 # ifdef BUCKETS_ROOT2
2165 # define MIN_EVEN_REPORT 6
2166 # else
2167 # define MIN_EVEN_REPORT MIN_BUCKET
2168 # endif
2170 int
2171 Perl_get_mstats(pTHX_ perl_mstats_t *buf, int buflen, int level)
2172 {
2173 #ifdef DEBUGGING_MSTATS
2174 int i, j;
2175 union overhead *p;
2176 struct chunk_chain_s* nextchain;
2178 PERL_ARGS_ASSERT_GET_MSTATS;
2180 buf->topbucket = buf->topbucket_ev = buf->topbucket_odd
2181 = buf->totfree = buf->total = buf->total_chain = 0;
2183 buf->minbucket = MIN_BUCKET;
2184 MALLOC_LOCK;
2185 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
2186 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
2187 ;
2188 if (i < buflen) {
2189 buf->nfree[i] = j;
2190 buf->ntotal[i] = nmalloc[i];
2191 }
2192 buf->totfree += j * BUCKET_SIZE_REAL(i);
2193 buf->total += nmalloc[i] * BUCKET_SIZE_REAL(i);
2194 if (nmalloc[i]) {
2195 i % 2 ? (buf->topbucket_odd = i) : (buf->topbucket_ev = i);
2196 buf->topbucket = i;
2197 }
2198 }
2199 nextchain = chunk_chain;
2200 while (nextchain) {
2201 buf->total_chain += nextchain->size;
2202 nextchain = nextchain->next;
2203 }
2204 buf->total_sbrk = goodsbrk + sbrk_slack;
2205 buf->sbrks = sbrks;
2206 buf->sbrk_good = sbrk_goodness;
2207 buf->sbrk_slack = sbrk_slack;
2208 buf->start_slack = start_slack;
2209 buf->sbrked_remains = sbrked_remains;
2210 MALLOC_UNLOCK;
2211 buf->nbuckets = NBUCKETS;
2212 if (level) {
2213 for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
2214 if (i >= buflen)
2215 break;
2216 buf->bucket_mem_size[i] = BUCKET_SIZE_NO_SURPLUS(i);
2217 buf->bucket_available_size[i] = BUCKET_SIZE_REAL(i);
2218 }
2219 }
2220 #else /* defined DEBUGGING_MSTATS */
2221 PerlIO_printf(Perl_error_log, "perl not compiled with DEBUGGING_MSTATS\n");
2222 #endif /* defined DEBUGGING_MSTATS */
2223 return 0; /* XXX unused */
2224 }
2225 /*
2226 * mstats - print out statistics about malloc
2227 *
2228 * Prints two lines of numbers, one showing the length of the free list
2229 * for each size category, the second showing the number of mallocs -
2230 * frees for each size category.
2231 */
2232 void
2233 Perl_dump_mstats(pTHX_ const char *s)
2234 {
2235 #ifdef DEBUGGING_MSTATS
2236 int i;
2237 perl_mstats_t buffer;
2238 UV nf[NBUCKETS];
2239 UV nt[NBUCKETS];
2241 PERL_ARGS_ASSERT_DUMP_MSTATS;
2243 buffer.nfree = nf;
2244 buffer.ntotal = nt;
2245 get_mstats(&buffer, NBUCKETS, 0);
2247 if (s)
2248 PerlIO_printf(Perl_error_log,
2249 "Memory allocation statistics %s (buckets %" IVdf
2250 "(%" IVdf ")..%" IVdf "(%" IVdf ")\n",
2251 s,
2252 (IV)BUCKET_SIZE_REAL(MIN_BUCKET),
2253 (IV)BUCKET_SIZE_NO_SURPLUS(MIN_BUCKET),
2254 (IV)BUCKET_SIZE_REAL(buffer.topbucket),
2255 (IV)BUCKET_SIZE_NO_SURPLUS(buffer.topbucket));
2256 PerlIO_printf(Perl_error_log, "%8" IVdf " free:", buffer.totfree);
2257 for (i = MIN_EVEN_REPORT; i <= buffer.topbucket; i += BUCKETS_PER_POW2) {
2258 PerlIO_printf(Perl_error_log,
2259 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
2260 ? " %5"UVuf
2261 : ((i < 12*BUCKETS_PER_POW2) ? " %3"UVuf : " %"UVuf)),
2262 buffer.nfree[i]);
2263 }
2264 #ifdef BUCKETS_ROOT2
2265 PerlIO_printf(Perl_error_log, "\n\t ");
2266 for (i = MIN_BUCKET + 1; i <= buffer.topbucket_odd; i += BUCKETS_PER_POW2) {
2267 PerlIO_printf(Perl_error_log,
2268 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
2269 ? " %5"UVuf
2270 : ((i < 12*BUCKETS_PER_POW2) ? " %3"UVuf : " %"UVuf)),
2271 buffer.nfree[i]);
2272 }
2273 #endif
2274 PerlIO_printf(Perl_error_log, "\n%8" IVdf " used:",
2275 buffer.total - buffer.totfree);
2276 for (i = MIN_EVEN_REPORT; i <= buffer.topbucket; i += BUCKETS_PER_POW2) {
2277 PerlIO_printf(Perl_error_log,
2278 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
2279 ? " %5"IVdf
2280 : ((i < 12*BUCKETS_PER_POW2) ? " %3"IVdf : " %"IVdf)),
2281 buffer.ntotal[i] - buffer.nfree[i]);
2282 }
2283 #ifdef BUCKETS_ROOT2
2284 PerlIO_printf(Perl_error_log, "\n\t ");
2285 for (i = MIN_BUCKET + 1; i <= buffer.topbucket_odd; i += BUCKETS_PER_POW2) {
2286 PerlIO_printf(Perl_error_log,
2287 ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
2288 ? " %5"IVdf
2289 : ((i < 12*BUCKETS_PER_POW2) ? " %3"IVdf : " %"IVdf)),
2290 buffer.ntotal[i] - buffer.nfree[i]);
2291 }
2292 #endif
2293 PerlIO_printf(Perl_error_log, "\nTotal sbrk(): %" IVdf "/%" IVdf ":%"
2294 IVdf ". Odd ends: pad+heads+chain+tail: %" IVdf "+%"
2295 IVdf "+%" IVdf "+%" IVdf ".\n",
2296 buffer.total_sbrk, buffer.sbrks, buffer.sbrk_good,
2297 buffer.sbrk_slack, buffer.start_slack,
2298 buffer.total_chain, buffer.sbrked_remains);
2299 #else /* DEBUGGING_MSTATS */
2300 PerlIO_printf(Perl_error_log, "%s: perl not compiled with DEBUGGING_MSTATS\n",s);
2301 #endif /* DEBUGGING_MSTATS */
2302 }
2304 #ifdef USE_PERL_SBRK
2306 # if defined(PURIFY)
2307 # define PERL_SBRK_VIA_MALLOC
2308 # endif
2310 # ifdef PERL_SBRK_VIA_MALLOC
2312 /* it may seem schizophrenic to use perl's malloc and let it call system */
2313 /* malloc, the reason for that is only the 3.2 version of the OS that had */
2314 /* frequent core dumps within nxzonefreenolock. This sbrk routine put an */
2315 /* end to the cores */
2317 # ifndef SYSTEM_ALLOC
2318 # define SYSTEM_ALLOC(a) malloc(a)
2319 # endif
2320 # ifndef SYSTEM_ALLOC_ALIGNMENT
2321 # define SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES
2322 # endif
2324 # endif /* PERL_SBRK_VIA_MALLOC */
2326 static IV Perl_sbrk_oldchunk;
2327 static long Perl_sbrk_oldsize;
2329 # define PERLSBRK_32_K (1<<15)
2330 # define PERLSBRK_64_K (1<<16)
2332 Malloc_t
2333 Perl_sbrk(int size)
2334 {
2335 IV got;
2336 int small, reqsize;
2338 if (!size) return 0;
2339 reqsize = size; /* just for the DEBUG_m statement */
2340 #ifdef PACK_MALLOC
2341 size = (size + 0x7ff) & ~0x7ff;
2342 #endif
2343 if (size <= Perl_sbrk_oldsize) {
2344 got = Perl_sbrk_oldchunk;
2345 Perl_sbrk_oldchunk += size;
2346 Perl_sbrk_oldsize -= size;
2347 } else {
2348 if (size >= PERLSBRK_32_K) {
2349 small = 0;
2350 } else {
2351 size = PERLSBRK_64_K;
2352 small = 1;
2353 }
2354 # if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT
2355 size += NEEDED_ALIGNMENT - SYSTEM_ALLOC_ALIGNMENT;
2356 # endif
2357 got = (IV)SYSTEM_ALLOC(size);
2358 # if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT
2359 got = (got + NEEDED_ALIGNMENT - 1) & ~(NEEDED_ALIGNMENT - 1);
2360 # endif
2361 if (small) {
2362 /* Chunk is small, register the rest for future allocs. */
2363 Perl_sbrk_oldchunk = got + reqsize;
2364 Perl_sbrk_oldsize = size - reqsize;
2365 }
2366 }
2368 DEBUG_m(PerlIO_printf(Perl_debug_log,
2369 "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%"
2370 UVxf "\n",
2371 size, reqsize, Perl_sbrk_oldsize, PTR2UV(got)));
2373 return (void *)got;
2374 }
2376 #endif /* ! defined USE_PERL_SBRK */
2378 /*
2379 * ex: set ts=8 sts=4 sw=4 et:
2380 */