System Hacking
[Heap Exploitation] malloc & free(glibc 2.23)
dokydoky
2017. 8. 9. 15:53
Reference
It is a simple version of dhavalkapil's work
https://heap-exploitation.dhavalkapil.com/diving_into_glibc_heap/core_functions.html
glibc 2.23 code
http://repo.or.cz/glibc.git/commit/ab30899d880f9741a409cbc0d7a28399bdac21bf
Malloc
Malloc pseudo-code
//step 1 if ) size == fastbin range 'return chunk' = chunk at the end of the the fastbin list if ) return chunk == null move on to 'smallbin case' else ) check 'the size of the chunk' == 'requested size' ( "malloc(): memory corruption (fast)" ) if ) size == smallbin range if ) chunk of requested size doesn't exist in smallbin call 'malloc_consolidate' skip into different bins else ) check victim→bk→fd == victim ( "malloc(): smallbin double linked list corrupted" ) Sets the PREV_INSUSE bit for the next chunk if ) size != smallbin range if ) av has fastchunks call 'malloc_consolidate' //step 2 : check unsorted chunks victim : one of the chunks in unsorted bin. while (MAX_ITERS ( 10000 ) times OR unsorted bin get exhausted) { if ) !(size >= minimum ( 2 *SIZE_SZ) AND size <= maximum (av→system_mem)) Throw an error ( "malloc(): memory corruption" ) if ) (size of requested chunk falls in smallbin range) and (victim is the last remainder chunk) and (it is the only chunk in the unsorted bin) and (the chunks size >= the one requested) split into 2 chunks. one for return , the other for last remainder chunk. else ) Remove the chunk from the unsorted bin. Add it to smallbin or largebin depends on the size. } //step 3 : check largebins split the chunk into two chunks( 'return chunk' and 'unsorted bin chunk' ). if ) !( unsorted_chunks(av)->fd->bk == unsorted_chunks(av) ) ( "malloc(): corrupted unsorted chunks" ) Otherwise, the entire victim chunk will be returned //step 4 : check a single bin(fast or small) split it into 2 chunks.( 'return chunk' and 'unsorted bin chunk' ). if ) !( unsorted_chunks(av)->fd->bk == unsorted_chunks(av) ) ( "malloc(): corrupted unsorted chunks 2" ) //step 5 : top chunk split it into 2 chunks.( 'return chunk' and 'top chunk' ) if ) has fastchunks call 'malloc_consolidate' return to step of 'check unsorted chunks' |
Exception codes in Malloc
fastbin check
3381 if (victim != 0 ) 3382 { 3383 if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0 )) 3384 { 3385 errstr = "malloc(): memory corruption (fast)" ; 3386 errout: 3387 malloc_printerr (check_action, errstr, chunk2mem (victim), av); 3388 return NULL; 3389 } 3390 check_remalloced_chunk (av, victim, nb); 3391 void *p = chunk2mem (victim); 3392 alloc_perturb (p, bytes); 3393 return p; 3394 } |
smallbin check
3416 bck = victim->bk; 3417 if (__glibc_unlikely (bck->fd != victim)) 3418 { 3419 errstr = "malloc(): smallbin double linked list corrupted" ; 3420 goto errout; 3421 } 3422 set_inuse_bit_at_offset (victim, nb); 3423 bin->bk = bck; 3424 bck->fd = bin; |
Free
Free pseudo-code
if NOT) p is before p+chunksize(p) error ( "free(): invalid pointer" ) if NOT) chunk >= MINSIZE OR multiple of MALLOC_ALIGNMENT error ( "free(): invalid size" ) if ) chunk's size == fastbin range if NOT) minumum <= next chunk's size <= maximum(av->system_mem) error ( "free(): invalid next size (fast)" ) if ) the chunk at the top == the chunk we are going to add error ( "double free or corruption (fasttop)" ) if NOT) the size of the chunk at the top == the size of the chunk we are adding. error ( "invalid fastbin entry (free)" ) Insert the chunk at the top of the fastbin list and return . if ) chunk is not mmapped if ) the chunk == top chunk error ( "double free or corruption (top)" ) if NOT) next chunk is within the boundaries of the arena error ( "double free or corruption (out)" ) if NOT) next chunk's PREVIOUS_IN_USE bit is marked error ( "double free or corruption (!prev)" ) if NOT) minumum <= next chunk's size <= maximum(av->system_mem) error ( "free(): invalid next size (normal)" ) if ) previous chunk is not in use unlink(previous chunk) if ) next chunk != top chunk if ) next chunk is not in use unlink(next chunk) Merge the chunk with previous or next, if free if NOT) unsorted_chunks(av)->fd->bk == unsorted_chunks(av) error ( "free(): corrupted unsorted chunks" ) Add it to the head of unsorted bin else ) // not top chunk Merge the chunk into a single top chunk else ) call munmap_chunk |
Exception codes in Free
SIZE_SZ = sizeof(size_t)
if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0 ) || __builtin_expect (chunksize (chunk_at_offset (p, size)) >= av->system_mem, 0 )) { /* We might not have a lock at this point and concurrent modifications of system_mem might have let to a false positive. Redo the test after getting the lock. */ if (have_lock || ({ assert (locked == 0 ); mutex_lock(&av->mutex); locked = 1 ; chunk_at_offset (p, size)->size <= 2 * SIZE_SZ || chunksize (chunk_at_offset (p, size)) >= av->system_mem; })) { errstr = "free(): invalid next size (fast)" ; goto errout; } if (! have_lock) { ( void )mutex_unlock(&av->mutex); locked = 0 ; } } free_perturb (chunk2mem(p), size - 2 * SIZE_SZ); set_fastchunks(av); unsigned int idx = fastbin_index(size); fb = &fastbin (av, idx); /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */ mchunkptr old = *fb, old2; unsigned int old_idx = ~0u; do { /* Check that the top of the bin is not the record we are going to add (i.e., double free). */ if (__builtin_expect (old == p, 0 )) { errstr = "double free or corruption (fasttop)" ; goto errout; } /* Check that size of fastbin chunk at the top is the same as size of the chunk that we are adding. We can dereference OLD only if we have the lock, otherwise it might have already been deallocated. See use of OLD_IDX below for the actual check. */ if (have_lock && old != NULL) old_idx = fastbin_index(chunksize(old)); p->fd = old2 = old; } while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2); if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0 )) { errstr = "invalid fastbin entry (free)" ; goto errout; } } /* Consolidate other non-mmapped chunks as they arrive. */ else if (!chunk_is_mmapped(p)) { if (! have_lock) { ( void )mutex_lock(&av->mutex); locked = 1 ; } nextchunk = chunk_at_offset(p, size); /* Lightweight tests: check whether the block is already the top block. */ if (__glibc_unlikely (p == av->top)) { errstr = "double free or corruption (top)" ; goto errout; } /* Or whether the next chunk is beyond the boundaries of the arena. */ if (__builtin_expect (contiguous (av) && ( char *) nextchunk >= (( char *) av->top + chunksize(av->top)), 0 )) { errstr = "double free or corruption (out)" ; goto errout; } /* Or whether the block is actually not marked used. */ if (__glibc_unlikely (!prev_inuse(nextchunk))) { errstr = "double free or corruption (!prev)" ; goto errout; } nextsize = chunksize(nextchunk); if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0 ) || __builtin_expect (nextsize >= av->system_mem, 0 )) { errstr = "free(): invalid next size (normal)" ; goto errout; } free_perturb (chunk2mem(p), size - 2 * SIZE_SZ); /* consolidate backward */ if (!prev_inuse(p)) { prevsize = p->prev_size; size += prevsize; p = chunk_at_offset(p, -(( long ) prevsize)); unlink(av, p, bck, fwd); } if (nextchunk != av->top) { /* get and clear inuse bit */ nextinuse = inuse_bit_at_offset(nextchunk, nextsize); /* consolidate forward */ if (!nextinuse) { unlink(av, nextchunk, bck, fwd); size += nextsize; } else clear_inuse_bit_at_offset(nextchunk, 0 ); /* Place the chunk in unsorted chunk list. Chunks are not placed into regular bins until after they have been given one chance to be used in malloc. */ bck = unsorted_chunks(av); fwd = bck->fd; if (__glibc_unlikely (fwd->bk != bck)) { errstr = "free(): corrupted unsorted chunks" ; goto errout; } |