mirror of
https://github.com/HackTricks-wiki/hacktricks.git
synced 2025-10-10 18:36:50 +00:00
1699 lines
49 KiB
Markdown
1699 lines
49 KiB
Markdown
# malloc & sysmalloc
|
|
|
|
{{#include ../../../banners/hacktricks-training.md}}
|
|
|
|
## Rezime Redosleda Alokacije <a href="#libc_malloc" id="libc_malloc"></a>
|
|
|
|
(Nema provere objašnjenih u ovom rezimeu i neki slučajevi su izostavljeni radi sažetosti)
|
|
|
|
1. `__libc_malloc` pokušava da dobije deo iz tcache, ako ne uspe, poziva `_int_malloc`
|
|
2. `_int_malloc` : 
|
|
1. Pokušava da generiše arenu ako ne postoji
|
|
2. Ako postoji bilo koji fast bin deo odgovarajuće veličine, koristi ga
|
|
1. Popunjava tcache sa drugim brzim delovima
|
|
3. Ako postoji bilo koji small bin deo odgovarajuće veličine, koristi ga
|
|
1. Popunjava tcache sa drugim delovima te veličine
|
|
4. Ako tražena veličina nije za small bins, konsoliduje fast bin u nesortirani bin
|
|
5. Proverava nesortirani bin, koristi prvi deo sa dovoljno prostora
|
|
1. Ako je pronađeni deo veći, podeli ga da vrati deo i dodaj ostatak nazad u nesortirani bin
|
|
2. Ako je deo iste veličine kao tražena veličina, koristi ga da popuni tcache umesto da ga vrati (dok tcache ne bude pun, onda vrati sledeći)
|
|
3. Za svaki deo manje veličine koji se proverava, stavi ga u odgovarajući small ili large bin
|
|
6. Proverava large bin u indeksu tražene veličine
|
|
1. Počinje da gleda od prvog dela koji je veći od tražene veličine, ako se pronađe, vrati ga i dodaj ostatke u small bin
|
|
7. Proverava large bins od sledećih indeksa do kraja
|
|
1. Od sledećeg većeg indeksa proverava bilo koji deo, podeli prvi pronađeni deo da ga koristi za traženu veličinu i dodaj ostatak u nesortirani bin
|
|
8. Ako ništa nije pronađeno u prethodnim binovima, uzmi deo iz top chunk
|
|
9. Ako top chunk nije bio dovoljno velik, povećaj ga sa `sysmalloc`
|
|
|
|
## \_\_libc_malloc <a href="#libc_malloc" id="libc_malloc"></a>
|
|
|
|
Funkcija `malloc` zapravo poziva `__libc_malloc`. Ova funkcija će proveriti tcache da vidi da li postoji bilo koji dostupni deo željene veličine. Ako postoji, koristiće ga, a ako ne, proveriće da li je u pitanju jedinstvena nit i u tom slučaju će pozvati `_int_malloc` u glavnoj areni, a ako ne, pozvaće `_int_malloc` u areni niti.
|
|
|
|
<details>
|
|
|
|
<summary>__libc_malloc kod</summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c
|
|
|
|
#if IS_IN (libc)
|
|
void *
|
|
__libc_malloc (size_t bytes)
|
|
{
|
|
mstate ar_ptr;
|
|
void *victim;
|
|
|
|
_Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2,
|
|
"PTRDIFF_MAX is not more than half of SIZE_MAX");
|
|
|
|
if (!__malloc_initialized)
|
|
ptmalloc_init ();
|
|
#if USE_TCACHE
|
|
/* int_free also calls request2size, be careful to not pad twice. */
|
|
size_t tbytes = checked_request2size (bytes);
|
|
if (tbytes == 0)
|
|
{
|
|
__set_errno (ENOMEM);
|
|
return NULL;
|
|
}
|
|
size_t tc_idx = csize2tidx (tbytes);
|
|
|
|
MAYBE_INIT_TCACHE ();
|
|
|
|
DIAG_PUSH_NEEDS_COMMENT;
|
|
if (tc_idx < mp_.tcache_bins
|
|
&& tcache != NULL
|
|
&& tcache->counts[tc_idx] > 0)
|
|
{
|
|
victim = tcache_get (tc_idx);
|
|
return tag_new_usable (victim);
|
|
}
|
|
DIAG_POP_NEEDS_COMMENT;
|
|
#endif
|
|
|
|
if (SINGLE_THREAD_P)
|
|
{
|
|
victim = tag_new_usable (_int_malloc (&main_arena, bytes));
|
|
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
|
|
&main_arena == arena_for_chunk (mem2chunk (victim)));
|
|
return victim;
|
|
}
|
|
|
|
arena_get (ar_ptr, bytes);
|
|
|
|
victim = _int_malloc (ar_ptr, bytes);
|
|
/* Retry with another arena only if we were able to find a usable arena
|
|
before. */
|
|
if (!victim && ar_ptr != NULL)
|
|
{
|
|
LIBC_PROBE (memory_malloc_retry, 1, bytes);
|
|
ar_ptr = arena_get_retry (ar_ptr, bytes);
|
|
victim = _int_malloc (ar_ptr, bytes);
|
|
}
|
|
|
|
if (ar_ptr != NULL)
|
|
__libc_lock_unlock (ar_ptr->mutex);
|
|
|
|
victim = tag_new_usable (victim);
|
|
|
|
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
|
|
ar_ptr == arena_for_chunk (mem2chunk (victim)));
|
|
return victim;
|
|
}
|
|
```
|
|
</details>
|
|
|
|
Napomena kako će uvek označiti vraćeni pokazivač sa `tag_new_usable`, iz koda:
|
|
```c
|
|
void *tag_new_usable (void *ptr)
|
|
|
|
Allocate a new random color and use it to color the user region of
|
|
a chunk; this may include data from the subsequent chunk's header
|
|
if tagging is sufficiently fine grained. Returns PTR suitably
|
|
recolored for accessing the memory there.
|
|
```
|
|
## \_int_malloc <a href="#int_malloc" id="int_malloc"></a>
|
|
|
|
Ovo je funkcija koja alocira memoriju koristeći druge binove i top chunk.
|
|
|
|
- Početak
|
|
|
|
Počinje definisanjem nekih varijabli i dobijanjem stvarne veličine koju traženi prostor za memoriju treba da ima:
|
|
|
|
<details>
|
|
|
|
<summary>_int_malloc start</summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3847
|
|
static void *
|
|
_int_malloc (mstate av, size_t bytes)
|
|
{
|
|
INTERNAL_SIZE_T nb; /* normalized request size */
|
|
unsigned int idx; /* associated bin index */
|
|
mbinptr bin; /* associated bin */
|
|
|
|
mchunkptr victim; /* inspected/selected chunk */
|
|
INTERNAL_SIZE_T size; /* its size */
|
|
int victim_index; /* its bin index */
|
|
|
|
mchunkptr remainder; /* remainder from a split */
|
|
unsigned long remainder_size; /* its size */
|
|
|
|
unsigned int block; /* bit map traverser */
|
|
unsigned int bit; /* bit map traverser */
|
|
unsigned int map; /* current word of binmap */
|
|
|
|
mchunkptr fwd; /* misc temp for linking */
|
|
mchunkptr bck; /* misc temp for linking */
|
|
|
|
#if USE_TCACHE
|
|
size_t tcache_unsorted_count; /* count of unsorted chunks processed */
|
|
#endif
|
|
|
|
/*
|
|
Convert request size to internal form by adding SIZE_SZ bytes
|
|
overhead plus possibly more to obtain necessary alignment and/or
|
|
to obtain a size of at least MINSIZE, the smallest allocatable
|
|
size. Also, checked_request2size returns false for request sizes
|
|
that are so large that they wrap around zero when padded and
|
|
aligned.
|
|
*/
|
|
|
|
nb = checked_request2size (bytes);
|
|
if (nb == 0)
|
|
{
|
|
__set_errno (ENOMEM);
|
|
return NULL;
|
|
}
|
|
```
|
|
</details>
|
|
|
|
### Arena
|
|
|
|
U malo verovatnom slučaju da ne postoje upotrebljive arene, koristi `sysmalloc` da dobije deo iz `mmap`:
|
|
|
|
<details>
|
|
|
|
<summary>_int_malloc not arena</summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3885C3-L3893C6
|
|
/* There are no usable arenas. Fall back to sysmalloc to get a chunk from
|
|
mmap. */
|
|
if (__glibc_unlikely (av == NULL))
|
|
{
|
|
void *p = sysmalloc (nb, av);
|
|
if (p != NULL)
|
|
alloc_perturb (p, bytes);
|
|
return p;
|
|
}
|
|
```
|
|
</details>
|
|
|
|
### Fast Bin
|
|
|
|
Ako je potrebna veličina unutar veličina Fast Bins, pokušajte da koristite deo iz fast bin. U suštini, na osnovu veličine, pronaći će indeks fast bin-a gde bi validni delovi trebali biti locirani, i ako ih ima, vratiće jedan od njih.\
|
|
Štaviše, ako je tcache omogućena, **napuniće tcache bin te veličine sa fast bins**.
|
|
|
|
Tokom izvođenja ovih akcija, izvršavaju se neki bezbednosni provere:
|
|
|
|
- Ako je deo neusklađen: `malloc(): unaligned fastbin chunk detected 2`
|
|
- Ako je napredni deo neusklađen: `malloc(): unaligned fastbin chunk detected`
|
|
- Ako vraćeni deo ima veličinu koja nije ispravna zbog svog indeksa u fast bin: `malloc(): memory corruption (fast)`
|
|
- Ako je bilo koji deo korišćen za punjenje tcache neusklađen: `malloc(): unaligned fastbin chunk detected 3`
|
|
|
|
<details>
|
|
|
|
<summary>_int_malloc fast bin</summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3895C3-L3967C6
|
|
/*
|
|
If the size qualifies as a fastbin, first check corresponding bin.
|
|
This code is safe to execute even if av is not yet initialized, so we
|
|
can try it without checking, which saves some time on this fast path.
|
|
*/
|
|
|
|
#define REMOVE_FB(fb, victim, pp) \
|
|
do \
|
|
{ \
|
|
victim = pp; \
|
|
if (victim == NULL) \
|
|
break; \
|
|
pp = REVEAL_PTR (victim->fd); \
|
|
if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
|
|
malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
|
|
} \
|
|
while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
|
|
!= victim); \
|
|
|
|
if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
|
|
{
|
|
idx = fastbin_index (nb);
|
|
mfastbinptr *fb = &fastbin (av, idx);
|
|
mchunkptr pp;
|
|
victim = *fb;
|
|
|
|
if (victim != NULL)
|
|
{
|
|
if (__glibc_unlikely (misaligned_chunk (victim)))
|
|
malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");
|
|
|
|
if (SINGLE_THREAD_P)
|
|
*fb = REVEAL_PTR (victim->fd);
|
|
else
|
|
REMOVE_FB (fb, pp, victim);
|
|
if (__glibc_likely (victim != NULL))
|
|
{
|
|
size_t victim_idx = fastbin_index (chunksize (victim));
|
|
if (__builtin_expect (victim_idx != idx, 0))
|
|
malloc_printerr ("malloc(): memory corruption (fast)");
|
|
check_remalloced_chunk (av, victim, nb);
|
|
#if USE_TCACHE
|
|
/* While we're here, if we see other chunks of the same size,
|
|
stash them in the tcache. */
|
|
size_t tc_idx = csize2tidx (nb);
|
|
if (tcache != NULL && tc_idx < mp_.tcache_bins)
|
|
{
|
|
mchunkptr tc_victim;
|
|
|
|
/* While bin not empty and tcache not full, copy chunks. */
|
|
while (tcache->counts[tc_idx] < mp_.tcache_count
|
|
&& (tc_victim = *fb) != NULL)
|
|
{
|
|
if (__glibc_unlikely (misaligned_chunk (tc_victim)))
|
|
malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
|
|
if (SINGLE_THREAD_P)
|
|
*fb = REVEAL_PTR (tc_victim->fd);
|
|
else
|
|
{
|
|
REMOVE_FB (fb, pp, tc_victim);
|
|
if (__glibc_unlikely (tc_victim == NULL))
|
|
break;
|
|
}
|
|
tcache_put (tc_victim, tc_idx);
|
|
}
|
|
}
|
|
#endif
|
|
void *p = chunk2mem (victim);
|
|
alloc_perturb (p, bytes);
|
|
return p;
|
|
}
|
|
}
|
|
}
|
|
```
|
|
</details>
|
|
|
|
### Small Bin
|
|
|
|
Kao što je naznačeno u komentaru, mali binovi drže jednu veličinu po indeksu, tako da je provera da li je dostupna validna chunk veoma brza, pa se nakon brzih binova proveravaju mali binovi.
|
|
|
|
Prva provera je da se utvrdi da li tražena veličina može biti unutar malog bina. U tom slučaju, uzmite odgovarajući **indeks** unutar smallbina i proverite da li postoji **bilo koja dostupna chunk**.
|
|
|
|
Zatim se vrši bezbednosna provera:
|
|
|
|
-  if `victim->bk->fd = victim`. Da se vidi da su oba chunk-a ispravno povezana.
|
|
|
|
U tom slučaju, chunk **dobija `inuse` bit,** dvostruko povezana lista se ispravlja tako da ovaj chunk nestaje iz nje (jer će biti korišćen), a bit za ne glavnu arenu se postavlja ako je potrebno.
|
|
|
|
Na kraju, **popunite tcache indeks tražene veličine** sa drugim chunk-ovima unutar malog bina (ako ih ima).
|
|
|
|
<details>
|
|
|
|
<summary>_int_malloc small bin</summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3895C3-L3967C6
|
|
|
|
/*
|
|
If a small request, check regular bin. Since these "smallbins"
|
|
hold one size each, no searching within bins is necessary.
|
|
(For a large request, we need to wait until unsorted chunks are
|
|
processed to find best fit. But for small ones, fits are exact
|
|
anyway, so we can check now, which is faster.)
|
|
*/
|
|
|
|
if (in_smallbin_range (nb))
|
|
{
|
|
idx = smallbin_index (nb);
|
|
bin = bin_at (av, idx);
|
|
|
|
if ((victim = last (bin)) != bin)
|
|
{
|
|
bck = victim->bk;
|
|
if (__glibc_unlikely (bck->fd != victim))
|
|
malloc_printerr ("malloc(): smallbin double linked list corrupted");
|
|
set_inuse_bit_at_offset (victim, nb);
|
|
bin->bk = bck;
|
|
bck->fd = bin;
|
|
|
|
if (av != &main_arena)
|
|
set_non_main_arena (victim);
|
|
check_malloced_chunk (av, victim, nb);
|
|
#if USE_TCACHE
|
|
/* While we're here, if we see other chunks of the same size,
|
|
stash them in the tcache. */
|
|
size_t tc_idx = csize2tidx (nb);
|
|
if (tcache != NULL && tc_idx < mp_.tcache_bins)
|
|
{
|
|
mchunkptr tc_victim;
|
|
|
|
/* While bin not empty and tcache not full, copy chunks over. */
|
|
while (tcache->counts[tc_idx] < mp_.tcache_count
|
|
&& (tc_victim = last (bin)) != bin)
|
|
{
|
|
if (tc_victim != 0)
|
|
{
|
|
bck = tc_victim->bk;
|
|
set_inuse_bit_at_offset (tc_victim, nb);
|
|
if (av != &main_arena)
|
|
set_non_main_arena (tc_victim);
|
|
bin->bk = bck;
|
|
bck->fd = bin;
|
|
|
|
tcache_put (tc_victim, tc_idx);
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
void *p = chunk2mem (victim);
|
|
alloc_perturb (p, bytes);
|
|
return p;
|
|
}
|
|
}
|
|
```
|
|
</details>
|
|
|
|
### malloc_consolidate
|
|
|
|
Ako to nije bio mali deo, to je veliki deo, i u ovom slučaju **`malloc_consolidate`** se poziva da bi se izbegla fragmentacija memorije.
|
|
|
|
<details>
|
|
|
|
<summary>malloc_consolidate poziv</summary>
|
|
```c
|
|
/*
|
|
If this is a large request, consolidate fastbins before continuing.
|
|
While it might look excessive to kill all fastbins before
|
|
even seeing if there is space available, this avoids
|
|
fragmentation problems normally associated with fastbins.
|
|
Also, in practice, programs tend to have runs of either small or
|
|
large requests, but less often mixtures, so consolidation is not
|
|
invoked all that often in most programs. And the programs that
|
|
it is called frequently in otherwise tend to fragment.
|
|
*/
|
|
|
|
else
|
|
{
|
|
idx = largebin_index (nb);
|
|
if (atomic_load_relaxed (&av->have_fastchunks))
|
|
malloc_consolidate (av);
|
|
}
|
|
|
|
```
|
|
</details>
|
|
|
|
Funkcija malloc consolidate u suštini uklanja delove iz brze kante i smešta ih u nesortiranu kantu. Nakon sledećeg malloc-a, ovi delovi će biti organizovani u svojim odgovarajućim malim/brzim kantama.
|
|
|
|
Napomena: ako se prilikom uklanjanja ovih delova otkriju prethodni ili sledeći delovi koji nisu u upotrebi, oni će biti **unlinkovani i spojeni** pre nego što se konačni deo stavi u **nesortiranu** kantu.
|
|
|
|
Za svaki deo iz brze kante vrši se nekoliko bezbednosnih provera:
|
|
|
|
- Ako je deo neporavnat: `malloc_consolidate(): unaligned fastbin chunk detected`
|
|
- Ako deo ima drugačiju veličinu od one koju bi trebao imati zbog indeksa u kojem se nalazi: `malloc_consolidate(): invalid chunk size`
|
|
- Ako prethodni deo nije u upotrebi i prethodni deo ima veličinu koja se razlikuje od one koju označava `prev_chunk`: `corrupted size vs. prev_size in fastbins`
|
|
|
|
<details>
|
|
|
|
<summary>malloc_consolidate funkcija</summary>
|
|
```c
|
|
// https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4810C1-L4905C2
|
|
|
|
static void malloc_consolidate(mstate av)
|
|
{
|
|
mfastbinptr* fb; /* current fastbin being consolidated */
|
|
mfastbinptr* maxfb; /* last fastbin (for loop control) */
|
|
mchunkptr p; /* current chunk being consolidated */
|
|
mchunkptr nextp; /* next chunk to consolidate */
|
|
mchunkptr unsorted_bin; /* bin header */
|
|
mchunkptr first_unsorted; /* chunk to link to */
|
|
|
|
/* These have same use as in free() */
|
|
mchunkptr nextchunk;
|
|
INTERNAL_SIZE_T size;
|
|
INTERNAL_SIZE_T nextsize;
|
|
INTERNAL_SIZE_T prevsize;
|
|
int nextinuse;
|
|
|
|
atomic_store_relaxed (&av->have_fastchunks, false);
|
|
|
|
unsorted_bin = unsorted_chunks(av);
|
|
|
|
/*
|
|
Remove each chunk from fast bin and consolidate it, placing it
|
|
then in unsorted bin. Among other reasons for doing this,
|
|
placing in unsorted bin avoids needing to calculate actual bins
|
|
until malloc is sure that chunks aren't immediately going to be
|
|
reused anyway.
|
|
*/
|
|
|
|
maxfb = &fastbin (av, NFASTBINS - 1);
|
|
fb = &fastbin (av, 0);
|
|
do {
|
|
p = atomic_exchange_acquire (fb, NULL);
|
|
if (p != 0) {
|
|
do {
|
|
{
|
|
if (__glibc_unlikely (misaligned_chunk (p)))
|
|
malloc_printerr ("malloc_consolidate(): "
|
|
"unaligned fastbin chunk detected");
|
|
|
|
unsigned int idx = fastbin_index (chunksize (p));
|
|
if ((&fastbin (av, idx)) != fb)
|
|
malloc_printerr ("malloc_consolidate(): invalid chunk size");
|
|
}
|
|
|
|
check_inuse_chunk(av, p);
|
|
nextp = REVEAL_PTR (p->fd);
|
|
|
|
/* Slightly streamlined version of consolidation code in free() */
|
|
size = chunksize (p);
|
|
nextchunk = chunk_at_offset(p, size);
|
|
nextsize = chunksize(nextchunk);
|
|
|
|
if (!prev_inuse(p)) {
|
|
prevsize = prev_size (p);
|
|
size += prevsize;
|
|
p = chunk_at_offset(p, -((long) prevsize));
|
|
if (__glibc_unlikely (chunksize(p) != prevsize))
|
|
malloc_printerr ("corrupted size vs. prev_size in fastbins");
|
|
unlink_chunk (av, p);
|
|
}
|
|
|
|
if (nextchunk != av->top) {
|
|
nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
|
|
|
|
if (!nextinuse) {
|
|
size += nextsize;
|
|
unlink_chunk (av, nextchunk);
|
|
} else
|
|
clear_inuse_bit_at_offset(nextchunk, 0);
|
|
|
|
first_unsorted = unsorted_bin->fd;
|
|
unsorted_bin->fd = p;
|
|
first_unsorted->bk = p;
|
|
|
|
if (!in_smallbin_range (size)) {
|
|
p->fd_nextsize = NULL;
|
|
p->bk_nextsize = NULL;
|
|
}
|
|
|
|
set_head(p, size | PREV_INUSE);
|
|
p->bk = unsorted_bin;
|
|
p->fd = first_unsorted;
|
|
set_foot(p, size);
|
|
}
|
|
|
|
else {
|
|
size += nextsize;
|
|
set_head(p, size | PREV_INUSE);
|
|
av->top = p;
|
|
}
|
|
|
|
} while ( (p = nextp) != 0);
|
|
|
|
}
|
|
} while (fb++ != maxfb);
|
|
}
|
|
```
|
|
</details>
|
|
|
|
### Nepoređeni kontejner
|
|
|
|
Vreme je da proverimo nepoređeni kontejner za potencijalno validan deo koji možemo koristiti.
|
|
|
|
#### Početak
|
|
|
|
Ovo počinje velikom for petljom koja će prolaziti kroz nepoređeni kontejner u `bk` pravcu dok ne stigne do kraja (arena struktura) sa `while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))` 
|
|
|
|
Pored toga, neki sigurnosni provere se vrše svaki put kada se razmatra novi deo:
|
|
|
|
- Ako je veličina dela čudna (previše mala ili previše velika): `malloc(): invalid size (unsorted)`
|
|
- Ako je veličina sledećeg dela čudna (previše mala ili previše velika): `malloc(): invalid next size (unsorted)`
|
|
- Ako se prethodna veličina koju označava sledeći deo razlikuje od veličine dela: `malloc(): mismatching next->prev_size (unsorted)`
|
|
- Ako nije `victim->bck->fd == victim` ili nije `victim->fd == av` (arena): `malloc(): unsorted double linked list corrupted`
|
|
- Kako uvek proveravamo poslednji, njegov `fd` bi trebao uvek da pokazuje na arena strukturu.
|
|
- Ako sledeći deo ne ukazuje da je prethodni u upotrebi: `malloc(): invalid next->prev_inuse (unsorted)`
|
|
|
|
<details>
|
|
|
|
<summary><code>_int_malloc</code> početak nepoređenog kontejnera</summary>
|
|
```c
|
|
/*
|
|
Process recently freed or remaindered chunks, taking one only if
|
|
it is exact fit, or, if this a small request, the chunk is remainder from
|
|
the most recent non-exact fit. Place other traversed chunks in
|
|
bins. Note that this step is the only place in any routine where
|
|
chunks are placed in bins.
|
|
|
|
The outer loop here is needed because we might not realize until
|
|
near the end of malloc that we should have consolidated, so must
|
|
do so and retry. This happens at most once, and only when we would
|
|
otherwise need to expand memory to service a "small" request.
|
|
*/
|
|
|
|
#if USE_TCACHE
|
|
INTERNAL_SIZE_T tcache_nb = 0;
|
|
size_t tc_idx = csize2tidx (nb);
|
|
if (tcache != NULL && tc_idx < mp_.tcache_bins)
|
|
tcache_nb = nb;
|
|
int return_cached = 0;
|
|
|
|
tcache_unsorted_count = 0;
|
|
#endif
|
|
|
|
for (;; )
|
|
{
|
|
int iters = 0;
|
|
while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
|
|
{
|
|
bck = victim->bk;
|
|
size = chunksize (victim);
|
|
mchunkptr next = chunk_at_offset (victim, size);
|
|
|
|
if (__glibc_unlikely (size <= CHUNK_HDR_SZ)
|
|
|| __glibc_unlikely (size > av->system_mem))
|
|
malloc_printerr ("malloc(): invalid size (unsorted)");
|
|
if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ)
|
|
|| __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
|
|
malloc_printerr ("malloc(): invalid next size (unsorted)");
|
|
if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
|
|
malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
|
|
if (__glibc_unlikely (bck->fd != victim)
|
|
|| __glibc_unlikely (victim->fd != unsorted_chunks (av)))
|
|
malloc_printerr ("malloc(): unsorted double linked list corrupted");
|
|
if (__glibc_unlikely (prev_inuse (next)))
|
|
malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
|
|
|
|
```
|
|
</details>
|
|
|
|
#### ako `in_smallbin_range`
|
|
|
|
Ako je deo veći od tražene veličine, iskoristite ga i postavite ostatak prostora dela u nesortiranu listu i ažurirajte `last_remainder` sa njim.
|
|
|
|
<details>
|
|
|
|
<summary><code>_int_malloc</code> nesortirana kofa <code>in_smallbin_range</code></summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4090C11-L4124C14
|
|
|
|
/*
|
|
If a small request, try to use last remainder if it is the
|
|
only chunk in unsorted bin. This helps promote locality for
|
|
runs of consecutive small requests. This is the only
|
|
exception to best-fit, and applies only when there is
|
|
no exact fit for a small chunk.
|
|
*/
|
|
|
|
if (in_smallbin_range (nb) &&
|
|
bck == unsorted_chunks (av) &&
|
|
victim == av->last_remainder &&
|
|
(unsigned long) (size) > (unsigned long) (nb + MINSIZE))
|
|
{
|
|
/* split and reattach remainder */
|
|
remainder_size = size - nb;
|
|
remainder = chunk_at_offset (victim, nb);
|
|
unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
|
|
av->last_remainder = remainder;
|
|
remainder->bk = remainder->fd = unsorted_chunks (av);
|
|
if (!in_smallbin_range (remainder_size))
|
|
{
|
|
remainder->fd_nextsize = NULL;
|
|
remainder->bk_nextsize = NULL;
|
|
}
|
|
|
|
set_head (victim, nb | PREV_INUSE |
|
|
(av != &main_arena ? NON_MAIN_ARENA : 0));
|
|
set_head (remainder, remainder_size | PREV_INUSE);
|
|
set_foot (remainder, remainder_size);
|
|
|
|
check_malloced_chunk (av, victim, nb);
|
|
void *p = chunk2mem (victim);
|
|
alloc_perturb (p, bytes);
|
|
return p;
|
|
}
|
|
|
|
```
|
|
</details>
|
|
|
|
Ako je ovo uspešno, vrati deo i to je to, ako ne, nastavi sa izvršavanjem funkcije...
|
|
|
|
#### ako je veličina jednaka
|
|
|
|
Nastavi sa uklanjanjem dela iz bin-a, u slučaju da je tražena veličina tačno veličina dela:
|
|
|
|
- Ako tcache nije popunjen, dodaj ga u tcache i nastavi ukazujući da postoji tcache deo koji bi mogao biti korišćen
|
|
- Ako je tcache pun, jednostavno ga koristi vraćajući ga
|
|
|
|
<details>
|
|
|
|
<summary><code>_int_malloc</code> nesortiran bin jednaka veličina</summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4126C11-L4157C14
|
|
|
|
/* remove from unsorted list */
|
|
unsorted_chunks (av)->bk = bck;
|
|
bck->fd = unsorted_chunks (av);
|
|
|
|
/* Take now instead of binning if exact fit */
|
|
|
|
if (size == nb)
|
|
{
|
|
set_inuse_bit_at_offset (victim, size);
|
|
if (av != &main_arena)
|
|
set_non_main_arena (victim);
|
|
#if USE_TCACHE
|
|
/* Fill cache first, return to user only if cache fills.
|
|
We may return one of these chunks later. */
|
|
if (tcache_nb > 0
|
|
&& tcache->counts[tc_idx] < mp_.tcache_count)
|
|
{
|
|
tcache_put (victim, tc_idx);
|
|
return_cached = 1;
|
|
continue;
|
|
}
|
|
else
|
|
{
|
|
#endif
|
|
check_malloced_chunk (av, victim, nb);
|
|
void *p = chunk2mem (victim);
|
|
alloc_perturb (p, bytes);
|
|
return p;
|
|
#if USE_TCACHE
|
|
}
|
|
#endif
|
|
}
|
|
|
|
```
|
|
</details>
|
|
|
|
Ako deo nije vraćen ili dodat u tcache, nastavite sa kodom...
|
|
|
|
#### stavite deo u kantu
|
|
|
|
Skladištite provereni deo u maloj kanti ili u velikoj kanti u zavisnosti od veličine dela (držeći veliku kantu pravilno organizovanom).
|
|
|
|
Vrše se bezbednosne provere kako bi se osiguralo da su obe velike kante dvostruko povezane liste oštećene:
|
|
|
|
- Ako `fwd->bk_nextsize->fd_nextsize != fwd`: `malloc(): largebin double linked list corrupted (nextsize)`
|
|
- Ako `fwd->bk->fd != fwd`: `malloc(): largebin double linked list corrupted (bk)`
|
|
|
|
<details>
|
|
|
|
<summary><code>_int_malloc</code> stavite deo u kantu</summary>
|
|
```c
|
|
/* place chunk in bin */
|
|
|
|
if (in_smallbin_range (size))
|
|
{
|
|
victim_index = smallbin_index (size);
|
|
bck = bin_at (av, victim_index);
|
|
fwd = bck->fd;
|
|
}
|
|
else
|
|
{
|
|
victim_index = largebin_index (size);
|
|
bck = bin_at (av, victim_index);
|
|
fwd = bck->fd;
|
|
|
|
/* maintain large bins in sorted order */
|
|
if (fwd != bck)
|
|
{
|
|
/* Or with inuse bit to speed comparisons */
|
|
size |= PREV_INUSE;
|
|
/* if smaller than smallest, bypass loop below */
|
|
assert (chunk_main_arena (bck->bk));
|
|
if ((unsigned long) (size)
|
|
< (unsigned long) chunksize_nomask (bck->bk))
|
|
{
|
|
fwd = bck;
|
|
bck = bck->bk;
|
|
|
|
victim->fd_nextsize = fwd->fd;
|
|
victim->bk_nextsize = fwd->fd->bk_nextsize;
|
|
fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
|
|
}
|
|
else
|
|
{
|
|
assert (chunk_main_arena (fwd));
|
|
while ((unsigned long) size < chunksize_nomask (fwd))
|
|
{
|
|
fwd = fwd->fd_nextsize;
|
|
assert (chunk_main_arena (fwd));
|
|
}
|
|
|
|
if ((unsigned long) size
|
|
== (unsigned long) chunksize_nomask (fwd))
|
|
/* Always insert in the second position. */
|
|
fwd = fwd->fd;
|
|
else
|
|
{
|
|
victim->fd_nextsize = fwd;
|
|
victim->bk_nextsize = fwd->bk_nextsize;
|
|
if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
|
|
malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
|
|
fwd->bk_nextsize = victim;
|
|
victim->bk_nextsize->fd_nextsize = victim;
|
|
}
|
|
bck = fwd->bk;
|
|
if (bck->fd != fwd)
|
|
malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
|
|
}
|
|
}
|
|
else
|
|
victim->fd_nextsize = victim->bk_nextsize = victim;
|
|
}
|
|
|
|
mark_bin (av, victim_index);
|
|
victim->bk = bck;
|
|
victim->fd = fwd;
|
|
fwd->bk = victim;
|
|
bck->fd = victim;
|
|
```
|
|
</details>
|
|
|
|
#### `_int_malloc` ograničenja
|
|
|
|
U ovom trenutku, ako je neki deo sačuvan u tcache koji se može koristiti i limit je dostignut, samo **vrati tcache deo**.
|
|
|
|
Štaviše, ako je dostignut **MAX_ITERS**, prekinite petlju i dobijte deo na drugačiji način (top deo).
|
|
|
|
Ako je `return_cached` postavljen, samo vratite deo iz tcache da biste izbegli veće pretrage.
|
|
|
|
<details>
|
|
|
|
<summary><code>_int_malloc</code> ograničenja</summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4227C1-L4250C7
|
|
|
|
#if USE_TCACHE
|
|
/* If we've processed as many chunks as we're allowed while
|
|
filling the cache, return one of the cached ones. */
|
|
++tcache_unsorted_count;
|
|
if (return_cached
|
|
&& mp_.tcache_unsorted_limit > 0
|
|
&& tcache_unsorted_count > mp_.tcache_unsorted_limit)
|
|
{
|
|
return tcache_get (tc_idx);
|
|
}
|
|
#endif
|
|
|
|
#define MAX_ITERS 10000
|
|
if (++iters >= MAX_ITERS)
|
|
break;
|
|
}
|
|
|
|
#if USE_TCACHE
|
|
/* If all the small chunks we found ended up cached, return one now. */
|
|
if (return_cached)
|
|
{
|
|
return tcache_get (tc_idx);
|
|
}
|
|
#endif
|
|
```
|
|
</details>
|
|
|
|
Ako granice nisu dostignute, nastavite sa kodom...
|
|
|
|
### Velika kesa (po indeksu)
|
|
|
|
Ako je zahtev velik (nije u maloj kesi) i još nismo vratili nijedan deo, uzmite **indeks** tražene veličine u **velikoj kesi**, proverite da li je **prazna** ili ako je **najveći deo u ovoj kesi veći** od tražene veličine i u tom slučaju pronađite **najmanji deo koji se može koristiti** za traženu veličinu.
|
|
|
|
Ako prostor koji ostaje od konačno korišćenog dela može biti novi deo, dodajte ga u neuređenu kesu i lsast_reminder se ažurira.
|
|
|
|
Bezbednosna provera se vrši prilikom dodavanja ostatka u neuređenu kesu:
|
|
|
|
- `bck->fd-> bk != bck`: `malloc(): oštećeni neuređeni delovi`
|
|
|
|
<details>
|
|
|
|
<summary><code>_int_malloc</code> Velika kesa (po indeksu)</summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4252C7-L4317C10
|
|
|
|
/*
|
|
If a large request, scan through the chunks of current bin in
|
|
sorted order to find smallest that fits. Use the skip list for this.
|
|
*/
|
|
|
|
if (!in_smallbin_range (nb))
|
|
{
|
|
bin = bin_at (av, idx);
|
|
|
|
/* skip scan if empty or largest chunk is too small */
|
|
if ((victim = first (bin)) != bin
|
|
&& (unsigned long) chunksize_nomask (victim)
|
|
>= (unsigned long) (nb))
|
|
{
|
|
victim = victim->bk_nextsize;
|
|
while (((unsigned long) (size = chunksize (victim)) <
|
|
(unsigned long) (nb)))
|
|
victim = victim->bk_nextsize;
|
|
|
|
/* Avoid removing the first entry for a size so that the skip
|
|
list does not have to be rerouted. */
|
|
if (victim != last (bin)
|
|
&& chunksize_nomask (victim)
|
|
== chunksize_nomask (victim->fd))
|
|
victim = victim->fd;
|
|
|
|
remainder_size = size - nb;
|
|
unlink_chunk (av, victim);
|
|
|
|
/* Exhaust */
|
|
if (remainder_size < MINSIZE)
|
|
{
|
|
set_inuse_bit_at_offset (victim, size);
|
|
if (av != &main_arena)
|
|
set_non_main_arena (victim);
|
|
}
|
|
/* Split */
|
|
else
|
|
{
|
|
remainder = chunk_at_offset (victim, nb);
|
|
/* We cannot assume the unsorted list is empty and therefore
|
|
have to perform a complete insert here. */
|
|
bck = unsorted_chunks (av);
|
|
fwd = bck->fd;
|
|
if (__glibc_unlikely (fwd->bk != bck))
|
|
malloc_printerr ("malloc(): corrupted unsorted chunks");
|
|
last_re->bk = bck;
|
|
remainder->fd = fwd;
|
|
bck->fd = remainder;
|
|
fwd->bk = remainder;
|
|
if (!in_smallbin_range (remainder_size))
|
|
{
|
|
remainder->fd_nextsize = NULL;
|
|
remainder->bk_nextsize = NULL;
|
|
}
|
|
set_head (victim, nb | PREV_INUSE |
|
|
(av != &main_arena ? NON_MAIN_ARENA : 0));
|
|
set_head (remainder, remainder_size | PREV_INUSE);
|
|
set_foot (remainder, remainder_size);
|
|
}
|
|
check_malloced_chunk (av, victim, nb);
|
|
void *p = chunk2mem (victim);
|
|
alloc_perturb (p, bytes);
|
|
return p;
|
|
}
|
|
}
|
|
```
|
|
</details>
|
|
|
|
Ako se deo ne pronađe kao pogodan za ovo, nastavite
|
|
|
|
### Velika kesa (sledeća veća)
|
|
|
|
Ako u tačnoj velikoj kesi nije bilo nijednog dela koji bi mogao da se koristi, počnite da prolazite kroz sve sledeće velike kese (počinjajući od odmah veće) dok se ne pronađe jedan (ako ih ima).
|
|
|
|
Ostatak podeljenog dela se dodaje u nesortiranu kesu, last_reminder se ažurira i vrši se ista provera bezbednosti:
|
|
|
|
- `bck->fd-> bk != bck`: `malloc(): oštećeni nesortirani delovi2`
|
|
|
|
<details>
|
|
|
|
<summary><code>_int_malloc</code> Velika kesa (sledeća veća)</summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4319C7-L4425C10
|
|
|
|
/*
|
|
Search for a chunk by scanning bins, starting with next largest
|
|
bin. This search is strictly by best-fit; i.e., the smallest
|
|
(with ties going to approximately the least recently used) chunk
|
|
that fits is selected.
|
|
|
|
The bitmap avoids needing to check that most blocks are nonempty.
|
|
The particular case of skipping all bins during warm-up phases
|
|
when no chunks have been returned yet is faster than it might look.
|
|
*/
|
|
|
|
++idx;
|
|
bin = bin_at (av, idx);
|
|
block = idx2block (idx);
|
|
map = av->binmap[block];
|
|
bit = idx2bit (idx);
|
|
|
|
for (;; )
|
|
{
|
|
/* Skip rest of block if there are no more set bits in this block. */
|
|
if (bit > map || bit == 0)
|
|
{
|
|
do
|
|
{
|
|
if (++block >= BINMAPSIZE) /* out of bins */
|
|
goto use_top;
|
|
}
|
|
while ((map = av->binmap[block]) == 0);
|
|
|
|
bin = bin_at (av, (block << BINMAPSHIFT));
|
|
bit = 1;
|
|
}
|
|
|
|
/* Advance to bin with set bit. There must be one. */
|
|
while ((bit & map) == 0)
|
|
{
|
|
bin = next_bin (bin);
|
|
bit <<= 1;
|
|
assert (bit != 0);
|
|
}
|
|
|
|
/* Inspect the bin. It is likely to be non-empty */
|
|
victim = last (bin);
|
|
|
|
/* If a false alarm (empty bin), clear the bit. */
|
|
if (victim == bin)
|
|
{
|
|
av->binmap[block] = map &= ~bit; /* Write through */
|
|
bin = next_bin (bin);
|
|
bit <<= 1;
|
|
}
|
|
|
|
else
|
|
{
|
|
size = chunksize (victim);
|
|
|
|
/* We know the first chunk in this bin is big enough to use. */
|
|
assert ((unsigned long) (size) >= (unsigned long) (nb));
|
|
|
|
remainder_size = size - nb;
|
|
|
|
/* unlink */
|
|
unlink_chunk (av, victim);
|
|
|
|
/* Exhaust */
|
|
if (remainder_size < MINSIZE)
|
|
{
|
|
set_inuse_bit_at_offset (victim, size);
|
|
if (av != &main_arena)
|
|
set_non_main_arena (victim);
|
|
}
|
|
|
|
/* Split */
|
|
else
|
|
{
|
|
remainder = chunk_at_offset (victim, nb);
|
|
|
|
/* We cannot assume the unsorted list is empty and therefore
|
|
have to perform a complete insert here. */
|
|
bck = unsorted_chunks (av);
|
|
fwd = bck->fd;
|
|
if (__glibc_unlikely (fwd->bk != bck))
|
|
malloc_printerr ("malloc(): corrupted unsorted chunks 2");
|
|
remainder->bk = bck;
|
|
remainder->fd = fwd;
|
|
bck->fd = remainder;
|
|
fwd->bk = remainder;
|
|
|
|
/* advertise as last remainder */
|
|
if (in_smallbin_range (nb))
|
|
av->last_remainder = remainder;
|
|
if (!in_smallbin_range (remainder_size))
|
|
{
|
|
remainder->fd_nextsize = NULL;
|
|
remainder->bk_nextsize = NULL;
|
|
}
|
|
set_head (victim, nb | PREV_INUSE |
|
|
(av != &main_arena ? NON_MAIN_ARENA : 0));
|
|
set_head (remainder, remainder_size | PREV_INUSE);
|
|
set_foot (remainder, remainder_size);
|
|
}
|
|
check_malloced_chunk (av, victim, nb);
|
|
void *p = chunk2mem (victim);
|
|
alloc_perturb (p, bytes);
|
|
return p;
|
|
}
|
|
}
|
|
```
|
|
</details>
|
|
|
|
### Top Chunk
|
|
|
|
U ovom trenutku, vreme je da se dobije novi chunk iz Top chunk-a (ako je dovoljno velik).
|
|
|
|
Počinje sa bezbednosnom proverom kako bi se osiguralo da veličina chunk-a nije prevelika (korumpirana):
|
|
|
|
- `chunksize(av->top) > av->system_mem`: `malloc(): corrupted top size`
|
|
|
|
Zatim će koristiti prostor top chunk-a ako je dovoljno velik da kreira chunk tražene veličine.\
|
|
Ako nije, ako postoje brzi chunk-ovi, konsolidujte ih i pokušajte ponovo.\
|
|
Na kraju, ako nema dovoljno prostora, koristite `sysmalloc` da alocirate dovoljnu veličinu.
|
|
|
|
<details>
|
|
|
|
<summary><code>_int_malloc</code> Top chunk</summary>
|
|
```c
|
|
use_top:
|
|
/*
|
|
If large enough, split off the chunk bordering the end of memory
|
|
(held in av->top). Note that this is in accord with the best-fit
|
|
search rule. In effect, av->top is treated as larger (and thus
|
|
less well fitting) than any other available chunk since it can
|
|
be extended to be as large as necessary (up to system
|
|
limitations).
|
|
|
|
We require that av->top always exists (i.e., has size >=
|
|
MINSIZE) after initialization, so if it would otherwise be
|
|
exhausted by current request, it is replenished. (The main
|
|
reason for ensuring it exists is that we may need MINSIZE space
|
|
to put in fenceposts in sysmalloc.)
|
|
*/
|
|
|
|
victim = av->top;
|
|
size = chunksize (victim);
|
|
|
|
if (__glibc_unlikely (size > av->system_mem))
|
|
malloc_printerr ("malloc(): corrupted top size");
|
|
|
|
if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
|
|
{
|
|
remainder_size = size - nb;
|
|
remainder = chunk_at_offset (victim, nb);
|
|
av->top = remainder;
|
|
set_head (victim, nb | PREV_INUSE |
|
|
(av != &main_arena ? NON_MAIN_ARENA : 0));
|
|
set_head (remainder, remainder_size | PREV_INUSE);
|
|
|
|
check_malloced_chunk (av, victim, nb);
|
|
void *p = chunk2mem (victim);
|
|
alloc_perturb (p, bytes);
|
|
return p;
|
|
}
|
|
|
|
/* When we are using atomic ops to free fast chunks we can get
|
|
here for all block sizes. */
|
|
else if (atomic_load_relaxed (&av->have_fastchunks))
|
|
{
|
|
malloc_consolidate (av);
|
|
/* restore original bin index */
|
|
if (in_smallbin_range (nb))
|
|
idx = smallbin_index (nb);
|
|
else
|
|
idx = largebin_index (nb);
|
|
}
|
|
|
|
/*
|
|
Otherwise, relay to handle system-dependent cases
|
|
*/
|
|
else
|
|
{
|
|
void *p = sysmalloc (nb, av);
|
|
if (p != NULL)
|
|
alloc_perturb (p, bytes);
|
|
return p;
|
|
}
|
|
}
|
|
}
|
|
|
|
```
|
|
</details>
|
|
|
|
## sysmalloc
|
|
|
|
### sysmalloc start
|
|
|
|
Ako je arena null ili je tražena veličina prevelika (i preostali mmaps su dozvoljeni) koristi `sysmalloc_mmap` za alokaciju prostora i vraćanje istog.
|
|
|
|
<details>
|
|
|
|
<summary>sysmalloc start</summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2531
|
|
|
|
/*
|
|
sysmalloc handles malloc cases requiring more memory from the system.
|
|
On entry, it is assumed that av->top does not have enough
|
|
space to service request for nb bytes, thus requiring that av->top
|
|
be extended or replaced.
|
|
*/
|
|
|
|
static void *
|
|
sysmalloc (INTERNAL_SIZE_T nb, mstate av)
|
|
{
|
|
mchunkptr old_top; /* incoming value of av->top */
|
|
INTERNAL_SIZE_T old_size; /* its size */
|
|
char *old_end; /* its end address */
|
|
|
|
long size; /* arg to first MORECORE or mmap call */
|
|
char *brk; /* return value from MORECORE */
|
|
|
|
long correction; /* arg to 2nd MORECORE call */
|
|
char *snd_brk; /* 2nd return val */
|
|
|
|
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
|
|
INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
|
|
char *aligned_brk; /* aligned offset into brk */
|
|
|
|
mchunkptr p; /* the allocated/returned chunk */
|
|
mchunkptr remainder; /* remainder from allocation */
|
|
unsigned long remainder_size; /* its size */
|
|
|
|
|
|
size_t pagesize = GLRO (dl_pagesize);
|
|
bool tried_mmap = false;
|
|
|
|
|
|
/*
|
|
If have mmap, and the request size meets the mmap threshold, and
|
|
the system supports mmap, and there are few enough currently
|
|
allocated mmapped regions, try to directly map this request
|
|
rather than expanding top.
|
|
*/
|
|
|
|
if (av == NULL
|
|
|| ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
|
|
&& (mp_.n_mmaps < mp_.n_mmaps_max)))
|
|
{
|
|
char *mm;
|
|
if (mp_.hp_pagesize > 0 && nb >= mp_.hp_pagesize)
|
|
{
|
|
/* There is no need to issue the THP madvise call if Huge Pages are
|
|
used directly. */
|
|
mm = sysmalloc_mmap (nb, mp_.hp_pagesize, mp_.hp_flags, av);
|
|
if (mm != MAP_FAILED)
|
|
return mm;
|
|
}
|
|
mm = sysmalloc_mmap (nb, pagesize, 0, av);
|
|
if (mm != MAP_FAILED)
|
|
return mm;
|
|
tried_mmap = true;
|
|
}
|
|
|
|
/* There are no usable arenas and mmap also failed. */
|
|
if (av == NULL)
|
|
return 0;
|
|
```
|
|
</details>
|
|
|
|
### sysmalloc provere
|
|
|
|
Započinje dobijanjem informacija o starom top chunk-u i proverava da li su neki od sledećih uslova tačni:
|
|
|
|
- Stara veličina heap-a je 0 (novi heap)
|
|
- Veličina prethodnog heap-a je veća od MINSIZE i stari Top je u upotrebi
|
|
- Heap je poravnat na veličinu stranice (0x1000, tako da donjih 12 bita treba da budu 0)
|
|
|
|
Takođe proverava da li:
|
|
|
|
- Stara veličina nema dovoljno prostora za kreiranje chunk-a za traženu veličinu
|
|
|
|
<details>
|
|
|
|
<summary>sysmalloc provere</summary>
|
|
```c
|
|
/* Record incoming configuration of top */
|
|
|
|
old_top = av->top;
|
|
old_size = chunksize (old_top);
|
|
old_end = (char *) (chunk_at_offset (old_top, old_size));
|
|
|
|
brk = snd_brk = (char *) (MORECORE_FAILURE);
|
|
|
|
/*
|
|
If not the first time through, we require old_size to be
|
|
at least MINSIZE and to have prev_inuse set.
|
|
*/
|
|
|
|
assert ((old_top == initial_top (av) && old_size == 0) ||
|
|
((unsigned long) (old_size) >= MINSIZE &&
|
|
prev_inuse (old_top) &&
|
|
((unsigned long) old_end & (pagesize - 1)) == 0));
|
|
|
|
/* Precondition: not enough current space to satisfy nb request */
|
|
assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
|
|
```
|
|
</details>
|
|
|
|
### sysmalloc ne glavna arena
|
|
|
|
Prvo će pokušati da **proširi** prethodni heap za ovaj heap. Ako to nije moguće, pokušaće da **alokira novi heap** i ažurira pokazivače kako bi mogli da ga koriste.\
|
|
Na kraju, ako to nije uspelo, pokušaće da pozove **`sysmalloc_mmap`**. 
|
|
|
|
<details>
|
|
|
|
<summary>sysmalloc ne glavna arena</summary>
|
|
```c
|
|
if (av != &main_arena)
|
|
{
|
|
heap_info *old_heap, *heap;
|
|
size_t old_heap_size;
|
|
|
|
/* First try to extend the current heap. */
|
|
old_heap = heap_for_ptr (old_top);
|
|
old_heap_size = old_heap->size;
|
|
if ((long) (MINSIZE + nb - old_size) > 0
|
|
&& grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
|
|
{
|
|
av->system_mem += old_heap->size - old_heap_size;
|
|
set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
|
|
| PREV_INUSE);
|
|
}
|
|
else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
|
|
{
|
|
/* Use a newly allocated heap. */
|
|
heap->ar_ptr = av;
|
|
heap->prev = old_heap;
|
|
av->system_mem += heap->size;
|
|
/* Set up the new top. */
|
|
top (av) = chunk_at_offset (heap, sizeof (*heap));
|
|
set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
|
|
|
|
/* Setup fencepost and free the old top chunk with a multiple of
|
|
MALLOC_ALIGNMENT in size. */
|
|
/* The fencepost takes at least MINSIZE bytes, because it might
|
|
become the top chunk again later. Note that a footer is set
|
|
up, too, although the chunk is marked in use. */
|
|
old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
|
|
set_head (chunk_at_offset (old_top, old_size + CHUNK_HDR_SZ),
|
|
0 | PREV_INUSE);
|
|
if (old_size >= MINSIZE)
|
|
{
|
|
set_head (chunk_at_offset (old_top, old_size),
|
|
CHUNK_HDR_SZ | PREV_INUSE);
|
|
set_foot (chunk_at_offset (old_top, old_size), CHUNK_HDR_SZ);
|
|
set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
|
|
_int_free (av, old_top, 1);
|
|
}
|
|
else
|
|
{
|
|
set_head (old_top, (old_size + CHUNK_HDR_SZ) | PREV_INUSE);
|
|
set_foot (old_top, (old_size + CHUNK_HDR_SZ));
|
|
}
|
|
}
|
|
else if (!tried_mmap)
|
|
{
|
|
/* We can at least try to use to mmap memory. If new_heap fails
|
|
it is unlikely that trying to allocate huge pages will
|
|
succeed. */
|
|
char *mm = sysmalloc_mmap (nb, pagesize, 0, av);
|
|
if (mm != MAP_FAILED)
|
|
return mm;
|
|
}
|
|
}
|
|
```
|
|
</details>
|
|
|
|
### sysmalloc glavna arena
|
|
|
|
Počinje da izračunava količinu potrebne memorije. Počeće tako što će zatražiti kontiguitetnu memoriju, tako da će u ovom slučaju biti moguće koristiti staru neiskorišćenu memoriju. Takođe se vrše neke operacije poravnanja.
|
|
|
|
<details>
|
|
|
|
<summary>sysmalloc glavna arena</summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2665C1-L2713C10
|
|
|
|
else /* av == main_arena */
|
|
|
|
|
|
{ /* Request enough space for nb + pad + overhead */
|
|
size = nb + mp_.top_pad + MINSIZE;
|
|
|
|
/*
|
|
If contiguous, we can subtract out existing space that we hope to
|
|
combine with new space. We add it back later only if
|
|
we don't actually get contiguous space.
|
|
*/
|
|
|
|
if (contiguous (av))
|
|
size -= old_size;
|
|
|
|
/*
|
|
Round to a multiple of page size or huge page size.
|
|
If MORECORE is not contiguous, this ensures that we only call it
|
|
with whole-page arguments. And if MORECORE is contiguous and
|
|
this is not first time through, this preserves page-alignment of
|
|
previous calls. Otherwise, we correct to page-align below.
|
|
*/
|
|
|
|
#ifdef MADV_HUGEPAGE
|
|
/* Defined in brk.c. */
|
|
extern void *__curbrk;
|
|
if (__glibc_unlikely (mp_.thp_pagesize != 0))
|
|
{
|
|
uintptr_t top = ALIGN_UP ((uintptr_t) __curbrk + size,
|
|
mp_.thp_pagesize);
|
|
size = top - (uintptr_t) __curbrk;
|
|
}
|
|
else
|
|
#endif
|
|
size = ALIGN_UP (size, GLRO(dl_pagesize));
|
|
|
|
/*
|
|
Don't try to call MORECORE if argument is so big as to appear
|
|
negative. Note that since mmap takes size_t arg, it may succeed
|
|
below even if we cannot call MORECORE.
|
|
*/
|
|
|
|
if (size > 0)
|
|
{
|
|
brk = (char *) (MORECORE (size));
|
|
if (brk != (char *) (MORECORE_FAILURE))
|
|
madvise_thp (brk, size);
|
|
LIBC_PROBE (memory_sbrk_more, 2, brk, size);
|
|
}
|
|
```
|
|
</details>
|
|
|
|
### sysmalloc glavna arena prethodna greška 1
|
|
|
|
Ako je prethodno vraćeno `MORECORE_FAILURE`, pokušajte ponovo da alocirate memoriju koristeći `sysmalloc_mmap_fallback`
|
|
|
|
<details>
|
|
|
|
<summary><code>sysmalloc</code> glavna arena prethodna greška 1</summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2715C7-L2740C10
|
|
|
|
if (brk == (char *) (MORECORE_FAILURE))
|
|
{
|
|
/*
|
|
If have mmap, try using it as a backup when MORECORE fails or
|
|
cannot be used. This is worth doing on systems that have "holes" in
|
|
address space, so sbrk cannot extend to give contiguous space, but
|
|
space is available elsewhere. Note that we ignore mmap max count
|
|
and threshold limits, since the space will not be used as a
|
|
segregated mmap region.
|
|
*/
|
|
|
|
char *mbrk = MAP_FAILED;
|
|
if (mp_.hp_pagesize > 0)
|
|
mbrk = sysmalloc_mmap_fallback (&size, nb, old_size,
|
|
mp_.hp_pagesize, mp_.hp_pagesize,
|
|
mp_.hp_flags, av);
|
|
if (mbrk == MAP_FAILED)
|
|
mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, MMAP_AS_MORECORE_SIZE,
|
|
pagesize, 0, av);
|
|
if (mbrk != MAP_FAILED)
|
|
{
|
|
/* We do not need, and cannot use, another sbrk call to find end */
|
|
brk = mbrk;
|
|
snd_brk = brk + size;
|
|
}
|
|
}
|
|
```
|
|
</details>
|
|
|
|
### sysmalloc glavna arena nastavak
|
|
|
|
Ako prethodno nije vratilo `MORECORE_FAILURE`, ako je uspelo, kreirajte neka poravnanja:
|
|
|
|
<details>
|
|
|
|
<summary>sysmalloc glavna arena prethodna greška 2</summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2742
|
|
|
|
if (brk != (char *) (MORECORE_FAILURE))
|
|
{
|
|
if (mp_.sbrk_base == 0)
|
|
mp_.sbrk_base = brk;
|
|
av->system_mem += size;
|
|
|
|
/*
|
|
If MORECORE extends previous space, we can likewise extend top size.
|
|
*/
|
|
|
|
if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
|
|
set_head (old_top, (size + old_size) | PREV_INUSE);
|
|
|
|
else if (contiguous (av) && old_size && brk < old_end)
|
|
/* Oops! Someone else killed our space.. Can't touch anything. */
|
|
malloc_printerr ("break adjusted to free malloc space");
|
|
|
|
/*
|
|
Otherwise, make adjustments:
|
|
|
|
* If the first time through or noncontiguous, we need to call sbrk
|
|
just to find out where the end of memory lies.
|
|
|
|
* We need to ensure that all returned chunks from malloc will meet
|
|
MALLOC_ALIGNMENT
|
|
|
|
* If there was an intervening foreign sbrk, we need to adjust sbrk
|
|
request size to account for fact that we will not be able to
|
|
combine new space with existing space in old_top.
|
|
|
|
* Almost all systems internally allocate whole pages at a time, in
|
|
which case we might as well use the whole last page of request.
|
|
So we allocate enough more memory to hit a page boundary now,
|
|
which in turn causes future contiguous calls to page-align.
|
|
*/
|
|
|
|
else
|
|
{
|
|
front_misalign = 0;
|
|
end_misalign = 0;
|
|
correction = 0;
|
|
aligned_brk = brk;
|
|
|
|
/* handle contiguous cases */
|
|
if (contiguous (av))
|
|
{
|
|
/* Count foreign sbrk as system_mem. */
|
|
if (old_size)
|
|
av->system_mem += brk - old_end;
|
|
|
|
/* Guarantee alignment of first new chunk made from this space */
|
|
|
|
front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
|
|
if (front_misalign > 0)
|
|
{
|
|
/*
|
|
Skip over some bytes to arrive at an aligned position.
|
|
We don't need to specially mark these wasted front bytes.
|
|
They will never be accessed anyway because
|
|
prev_inuse of av->top (and any chunk created from its start)
|
|
is always true after initialization.
|
|
*/
|
|
|
|
correction = MALLOC_ALIGNMENT - front_misalign;
|
|
aligned_brk += correction;
|
|
}
|
|
|
|
/*
|
|
If this isn't adjacent to existing space, then we will not
|
|
be able to merge with old_top space, so must add to 2nd request.
|
|
*/
|
|
|
|
correction += old_size;
|
|
|
|
/* Extend the end address to hit a page boundary */
|
|
end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
|
|
correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
|
|
|
|
assert (correction >= 0);
|
|
snd_brk = (char *) (MORECORE (correction));
|
|
|
|
/*
|
|
If can't allocate correction, try to at least find out current
|
|
brk. It might be enough to proceed without failing.
|
|
|
|
Note that if second sbrk did NOT fail, we assume that space
|
|
is contiguous with first sbrk. This is a safe assumption unless
|
|
program is multithreaded but doesn't use locks and a foreign sbrk
|
|
occurred between our first and second calls.
|
|
*/
|
|
|
|
if (snd_brk == (char *) (MORECORE_FAILURE))
|
|
{
|
|
correction = 0;
|
|
snd_brk = (char *) (MORECORE (0));
|
|
}
|
|
else
|
|
madvise_thp (snd_brk, correction);
|
|
}
|
|
|
|
/* handle non-contiguous cases */
|
|
else
|
|
{
|
|
if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
|
|
/* MORECORE/mmap must correctly align */
|
|
assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
|
|
else
|
|
{
|
|
front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
|
|
if (front_misalign > 0)
|
|
{
|
|
/*
|
|
Skip over some bytes to arrive at an aligned position.
|
|
We don't need to specially mark these wasted front bytes.
|
|
They will never be accessed anyway because
|
|
prev_inuse of av->top (and any chunk created from its start)
|
|
is always true after initialization.
|
|
*/
|
|
|
|
aligned_brk += MALLOC_ALIGNMENT - front_misalign;
|
|
}
|
|
}
|
|
|
|
/* Find out current end of memory */
|
|
if (snd_brk == (char *) (MORECORE_FAILURE))
|
|
{
|
|
snd_brk = (char *) (MORECORE (0));
|
|
}
|
|
}
|
|
|
|
/* Adjust top based on results of second sbrk */
|
|
if (snd_brk != (char *) (MORECORE_FAILURE))
|
|
{
|
|
av->top = (mchunkptr) aligned_brk;
|
|
set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
|
|
av->system_mem += correction;
|
|
|
|
/*
|
|
If not the first time through, we either have a
|
|
gap due to foreign sbrk or a non-contiguous region. Insert a
|
|
double fencepost at old_top to prevent consolidation with space
|
|
we don't own. These fenceposts are artificial chunks that are
|
|
marked as inuse and are in any case too small to use. We need
|
|
two to make sizes and alignments work out.
|
|
*/
|
|
|
|
if (old_size != 0)
|
|
{
|
|
/*
|
|
Shrink old_top to insert fenceposts, keeping size a
|
|
multiple of MALLOC_ALIGNMENT. We know there is at least
|
|
enough space in old_top to do this.
|
|
*/
|
|
old_size = (old_size - 2 * CHUNK_HDR_SZ) & ~MALLOC_ALIGN_MASK;
|
|
set_head (old_top, old_size | PREV_INUSE);
|
|
|
|
/*
|
|
Note that the following assignments completely overwrite
|
|
old_top when old_size was previously MINSIZE. This is
|
|
intentional. We need the fencepost, even if old_top otherwise gets
|
|
lost.
|
|
*/
|
|
set_head (chunk_at_offset (old_top, old_size),
|
|
CHUNK_HDR_SZ | PREV_INUSE);
|
|
set_head (chunk_at_offset (old_top,
|
|
old_size + CHUNK_HDR_SZ),
|
|
CHUNK_HDR_SZ | PREV_INUSE);
|
|
|
|
/* If possible, release the rest. */
|
|
if (old_size >= MINSIZE)
|
|
{
|
|
_int_free (av, old_top, 1);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} /* if (av != &main_arena) */
|
|
```
|
|
</details>
|
|
|
|
### sysmalloc finale
|
|
|
|
Završite alokaciju ažuriranjem informacija o areni.
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2921C3-L2943C12
|
|
|
|
if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
|
|
av->max_system_mem = av->system_mem;
|
|
check_malloc_state (av);
|
|
|
|
/* finally, do the allocation */
|
|
p = av->top;
|
|
size = chunksize (p);
|
|
|
|
/* check that one of the above allocation paths succeeded */
|
|
if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
|
|
{
|
|
remainder_size = size - nb;
|
|
remainder = chunk_at_offset (p, nb);
|
|
av->top = remainder;
|
|
set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
|
|
set_head (remainder, remainder_size | PREV_INUSE);
|
|
check_malloced_chunk (av, p, nb);
|
|
return chunk2mem (p);
|
|
}
|
|
|
|
/* catch all failure paths */
|
|
__set_errno (ENOMEM);
|
|
return 0;
|
|
```
|
|
## sysmalloc_mmap
|
|
|
|
<details>
|
|
|
|
<summary>sysmalloc_mmap код</summary>
|
|
```c
|
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2392C1-L2481C2
|
|
|
|
static void *
|
|
sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
|
|
{
|
|
long int size;
|
|
|
|
/*
|
|
Round up size to nearest page. For mmapped chunks, the overhead is one
|
|
SIZE_SZ unit larger than for normal chunks, because there is no
|
|
following chunk whose prev_size field could be used.
|
|
|
|
See the front_misalign handling below, for glibc there is no need for
|
|
further alignments unless we have have high alignment.
|
|
*/
|
|
if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
|
|
size = ALIGN_UP (nb + SIZE_SZ, pagesize);
|
|
else
|
|
size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
|
|
|
|
/* Don't try if size wraps around 0. */
|
|
if ((unsigned long) (size) <= (unsigned long) (nb))
|
|
return MAP_FAILED;
|
|
|
|
char *mm = (char *) MMAP (0, size,
|
|
mtag_mmap_flags | PROT_READ | PROT_WRITE,
|
|
extra_flags);
|
|
if (mm == MAP_FAILED)
|
|
return mm;
|
|
|
|
#ifdef MAP_HUGETLB
|
|
if (!(extra_flags & MAP_HUGETLB))
|
|
madvise_thp (mm, size);
|
|
#endif
|
|
|
|
__set_vma_name (mm, size, " glibc: malloc");
|
|
|
|
/*
|
|
The offset to the start of the mmapped region is stored in the prev_size
|
|
field of the chunk. This allows us to adjust returned start address to
|
|
meet alignment requirements here and in memalign(), and still be able to
|
|
compute proper address argument for later munmap in free() and realloc().
|
|
*/
|
|
|
|
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
|
|
|
|
if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
|
|
{
|
|
/* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and
|
|
MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1. Each mmap'ed area is page
|
|
aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
|
|
assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
|
|
front_misalign = 0;
|
|
}
|
|
else
|
|
front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
|
|
|
|
mchunkptr p; /* the allocated/returned chunk */
|
|
|
|
if (front_misalign > 0)
|
|
{
|
|
ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign;
|
|
p = (mchunkptr) (mm + correction);
|
|
set_prev_size (p, correction);
|
|
set_head (p, (size - correction) | IS_MMAPPED);
|
|
}
|
|
else
|
|
{
|
|
p = (mchunkptr) mm;
|
|
set_prev_size (p, 0);
|
|
set_head (p, size | IS_MMAPPED);
|
|
}
|
|
|
|
/* update statistics */
|
|
int new = atomic_fetch_add_relaxed (&mp_.n_mmaps, 1) + 1;
|
|
atomic_max (&mp_.max_n_mmaps, new);
|
|
|
|
unsigned long sum;
|
|
sum = atomic_fetch_add_relaxed (&mp_.mmapped_mem, size) + size;
|
|
atomic_max (&mp_.max_mmapped_mem, sum);
|
|
|
|
check_chunk (av, p);
|
|
|
|
return chunk2mem (p);
|
|
}
|
|
```
|
|
</details>
|
|
|
|
{{#include ../../../banners/hacktricks-training.md}}
|