Revamp 32-bit stub/src/*-linux.elf-so_main.c

... including better error checking of system calls
	modified:   stub/src/i386-linux.elf-entry.S
	modified:   stub/src/i386-linux.elf-fold.S
	modified:   stub/src/i386-linux.elf-so_entry.S
	modified:   stub/src/i386-linux.elf-so_fold.S
	modified:   stub/src/i386-linux.elf-so_main.c
This commit is contained in:
John Reiser 2024-11-18 16:12:44 -08:00
parent 7ef018fed0
commit 58d9b00a0a
5 changed files with 194 additions and 164 deletions

View File

@ -311,7 +311,7 @@ eof_n2b:
push $0 // arg6
push mfd // arg5
push $MAP_FIXED|MAP_PRIVATE // arg4
push $PROT_READ|PROT_EXEC // arg3
push $PROT_READ|PROT_EXEC // arg3 PROT_WRITE: DEBUG ONLY
push F_LENU(%ebp) // arg2
push F_ADRU(%ebp) // arg1
call mmap; add $6*NBPW,%esp
@ -409,7 +409,9 @@ my_bkpt: .globl my_bkpt
int3 // my_bkpt
ret
// IDENTSTR goes here
.balign 4
upx_mmap_and_fd:
// section UMF_LINUX or UMF_ANDROID goes here
section ELFMAINZ
L70:

View File

@ -304,11 +304,17 @@ Pprotect: .globl Pprotect
// FIXME: page-shift the file offset (last parameter) ??
// C-callable, so do NOT remove arguments as part of return
mmap: .globl mmap // oldmmap: ebx -> 6 arguments
push ebx // save register
lea ebx,[2*NBPW + esp]
mov al,__NR_oldmmap; call sys_check_al
pop ebx // restore register
ret
push ebx // save register
lea ebx,[2*NBPW + esp]
mov al,__NR_oldmmap; call sys_check_al
mov ecx,[0*NBPW + ebx] // requested addr
test ecx,ecx; je 0f // kernel chose
testb [3*NBPW + ebx],MAP_FIXED; je 0f
cmp ecx,eax; je 0f // addr was preserved
hlt
0:
pop ebx // restore register
ret
sys_check_al:
movzbl eax,al
@ -320,21 +326,17 @@ sys_check:
hlt
stat: .globl stat
push %ebp
mov %ebp,%esp
push %ebx
push %ebp; mov %ebp,%esp; push %ebx
mov %ebx,[2*NBPW + %ebp]
mov %ecx,[3*NBPW + %ebp]
push __NR_stat; pop %eax; int 0x80
mov al,__NR_stat; call sys_check_al
pop %ebx; pop %ebp
ret
uname: .globl uname
push %ebp
mov %ebp,%esp
push %ebx
push %ebp; mov %ebp,%esp; push %ebx
mov %ebx, [2*NBPW + %ebp]
push __NR_uname; pop %eax; int 0x80
mov al,__NR_uname; call sys_check_al
pop %ebx; pop %ebp
ret
@ -344,9 +346,8 @@ mkdir: .globl mkdir
push %ebx
mov %ebx,[2*NBPW + %ebp]
mov %ecx,[3*NBPW + %ebp]
push __NR_mkdir; pop %eax; int 0x80
pop %ebx
pop %ebp
mov al,__NR_mkdir; call sys_check_al
pop %ebx; pop %ebp
ret
memset: .globl memset // (dst, val, n)

View File

@ -48,15 +48,17 @@ MAP_PRIVATE= 2
MAP_FIXED= 0x10
MAP_ANONYMOUS= 0x20
__NR_memfd_create= 0x164 // 356
__NR_mprotect=125
__NR_munmap= 91
__NR_oldmmap= 90 // old mmap: %ebx -> args[6]
__NR_read= 3
__NR_close= 6
__NR_exit= 1
__NR_write= 4
__NR_memfd_create= 0x164 // 356
__NR_mkdir= 39
__NR_mprotect=125
__NR_munmap= 91
__NR_oldmmap= 90 // old mmap: %ebx -> args[6]
__NR_read= 3
__NR_stat= 106
__NR_uname= 122
__NR_write= 4
PAGE_SHIFT= 12
PAGE_MASK= (~0<<PAGE_SHIFT)
@ -137,8 +139,8 @@ LEN_PATH= 1+ 11 + NAME_MAX + 13 // "/data/data/$APP_NAME/cache/upxAAA"
push %edi // arg3 pathname; pathname[0] = '\0'
push %ecx // arg2 F_LENU
push $0 // arg1 any page address
call upx_mmap_and_fd // %eax= page_addr | (1+fd)
add $3*NBPW,%esp
call upx_mmap_and_fd; add $3*NBPW,%esp // %eax= page_addr | (1+fd)
test $(1<<11),%eax; jz 0f; hlt; 0: // fd "negative" ==> failure
#define mfd %edx
mov %eax,mfd
shrl $12,%eax
@ -282,6 +284,10 @@ old_mmap: // oldmmap: ebx -> 6 arguments; remove arguments on return
cmp $PAGE_MASK,%eax; jb 0f; hlt; 0:
ret $6*4
.balign 4
upx_mmap_and_fd:
// section UMF_LINUX or UMF_ANDROID goes here
// IDENTSTR goes here
section ELFMAINZ
@ -289,6 +295,28 @@ get_upxfn_path: .globl get_upxfn_path // char * (*)(void)
xor %eax,%eax // persistence not desired
ret
stat: .globl stat
xchg %ebx,NBPW(%esp)
mov 2*NBPW(%esp),%ecx
movb $__NR_stat,%al; call sys_check_al
mov NBPW(%esp),%ebx
ret
uname: .globl uname
push %ebp; mov %esp,%ebp; push %ebx
mov 2*NBPW(%ebp),%ebx
movb $__NR_uname,%al; call sys_check_al
pop %ebx; pop %ebp
ret
mkdir: .globl mkdir
push %ebp; mov %esp,%ebp; push %ebx
mov 2*NBPW(%ebp),%ebx
mov 3*NBPW(%ebp),%ecx
movb $__NR_mkdir,%al; call sys_check_al
pop %ebx; pop %ebp
ret
memset: .globl memset // (dst, val, n)
push %ebp; mov %esp,%ebp
push %edi
@ -320,7 +348,7 @@ my_bkpt:
mmap: .globl mmap // oldmmap: ebx -> 6 arguments
push %ebx // save register
lea 2*NBPW(%esp),%ebx
mov $__NR_oldmmap,%al; call sys_check_al
movb $__NR_oldmmap,%al; call sys_check_al
pop %ebx // restore register
ret

View File

@ -2,6 +2,7 @@
#define section .section
NBPW= 4
MAP_FIXED= 0x10
#ifndef DEBUG //{
#define DEBUG 0
@ -40,7 +41,6 @@ get_upxfn_path: .globl get_upxfn_path // char * (*)(void)
ret
L05: // %esp/ &so_info,PMASK,F_ADRU,F_LENU,8regs,ret_addr,argc
int3
pop %ecx // &so_info
lea (3+8+1)*NBPW(%esp),%eax // &{argc,argv,envp}
sub $MAX_ELF_HDR_32,%esp; push %esp // &elf_tmp
@ -153,11 +153,16 @@ mmap: .globl mmap // oldmmap: %ebx -> 6 word parameters
push %ebx // save C-lang register
lea 2*NBPW(%esp),%ebx
mov (%ebx),%eax // arg1
and $0xfff,%eax // lo fragment
and $0xfff,%eax // lo fragment PAGE_SIZE
sub %eax, (%ebx) // page align lo end
add %eax,NBPW(%ebx)
push $ __NR_mmap; pop %eax
int $0x80; cmp $-0x1000,%eax; jna 0f; hlt; 0:
movb $ __NR_mmap,%al; call sys_check_al
mov 0*NBPW(%ebx),%ecx // requested addr
test %ecx,%ecx; je 0f // kernel chose
testb $MAP_FIXED,3*NBPW(%ebx); je 0f
cmp %ecx,%eax; je 0f // addr was preserved
hlt
0:
pop %ebx // restore
ret
@ -187,12 +192,11 @@ mmap: .globl mmap // oldmmap: %ebx -> 6 word parameters
Pprotect: .globl Pprotect // from C
xchg %ebx,1*NBPW(%esp) // save reg, %ebx= address
mov %ebx,%ecx // copy address
and $~0<<12,%ebx // page align
and $~0<<12,%ebx // page align PAGE_MASK
sub %ebx,%ecx // extra length
add 2*NBPW(%esp),%ecx // length
mov 3*NBPW(%esp),%edx // bits
push $__NR_mprotect; pop %eax; int $0x80
cmp $-0x1000,%eax; jna 0f; hlt; 0:
movb $__NR_mprotect,%al; call sys_check_al
mov 1*NBPW(%esp),%ebx // restore reg
ret
@ -200,34 +204,38 @@ Punmap: .globl Punmap // from C
push %ebp; mov %esp,%ebp
push %ebx
mov (0+2)*NBPW(%ebp),%ebx // addr
mov %ebx,%eax; and $-1+ (1<<12),%eax
mov %ebx,%eax; and $-1+ (1<<12),%eax // PAGE_MASK
sub %eax,%ebx
mov (1+2)*NBPW(%ebp),%ecx // len
add %eax,%ecx
push $__NR_munmap; pop %eax; int $0x80
cmp $-0x1000,%eax; jna 0f; hlt; 0:
movb $__NR_munmap,%al; call sys_check_al
pop %ebx; pop %ebp
ret
memfd_create: .globl memfd_create
push $__NR_memfd_create; 5: jmp 5f
mprotect: .globl mprotect
mov %ebx,%eax; and $-1+ (1<<12),%eax
mov $__NR_memfd_create,%eax; jmp sys_check
mprotect: .globl mprotect // also Pprotect
mov %ebx,%eax; and $-1+ (1<<12),%eax // PAGE_MASK
sub %eax,%ebx
add %eax,%ecx
push $ __NR_mprotect; 5: jmp 5f
movb $ __NR_mprotect,%al; 5: jmp 5f
exit: .globl exit
push $ __NR_exit; 5: jmp 5f
movb $ __NR_exit,%al; 5: jmp 5f
close: .globl close
push $__NR_close; 5: jmp 5f
movb $__NR_close,%al; 5: jmp 5f
munmap: .globl munmap
push $ __NR_munmap; 5: jmp 5f
movb $ __NR_munmap,%al; 5: jmp 5f
Pwrite: .globl Pwrite
int3
write: .globl write
push $__NR_write; 5:
pop %eax
movb $__NR_write,%al; 5:
sys_check_al:
movzbl %al,%eax
sys_check:
push %eax // save __NR_ for debug
int $0x80
ret
pop %edx // recover __NR_ for debug
cmp $-1<<12,%eax; jae 0f; ret; 0:
hlt
// section SO_MAIN inserted here

View File

@ -34,7 +34,7 @@
extern void my_bkpt(void const *arg1, ...);
#define DEBUG 1
#define DEBUG 0
// Pprotect is mprotect, but page-aligned on the lo end (Linux requirement)
unsigned Pprotect(void *, size_t, unsigned);
@ -164,8 +164,6 @@ typedef struct {
static void
xread(Extent *x, char *buf, size_t count)
{
DPRINTF("xread x.size=%%x x.buf=%%p buf=%%p count=%%x\\n",
x->size, x->buf, buf, count);
char *p=x->buf, *q=buf;
size_t j;
if (x->size < count) {
@ -176,8 +174,6 @@ xread(Extent *x, char *buf, size_t count)
}
x->buf += count;
x->size -= count;
DPRINTF("yread x.size=%%x x.buf=%%p buf=%%p count=%%x\\n",
x->size, x->buf, buf, count);
}
@ -185,7 +181,7 @@ xread(Extent *x, char *buf, size_t count)
// UPX & NRV stuff
**************************************************************************/
int f_expand( // .globl in $(ARCH)-linux.elf-so_fold.S
extern int f_expand( // .globl in $(ARCH)-linux.elf-so_fold.S
nrv_byte const *binfo, nrv_byte *dst, size_t *dstlen);
static void
@ -195,8 +191,8 @@ unpackExtent(
)
{
while (xo->size) {
DPRINTF("unpackExtent xi=(%%p %%p) xo=(%%p %%p) f_expand=%%p\\n",
xi->size, xi->buf, xo->size, xo->buf, f_expand);
DPRINTF("unpackExtent xi=(%%p %%p) xo=(%%p %%p)\\n",
xi->size, xi->buf, xo->size, xo->buf);
struct b_info h;
// Note: if h.sz_unc == h.sz_cpr then the block was not
// compressible and is stored in its uncompressed form.
@ -265,7 +261,7 @@ ERR_LAB
error;
#endif //}
extern unsigned long upx_mmap_and_fd( // x86_64 Android emulator of i386 is not faithful
extern char *upx_mmap_and_fd( // x86_64 Android emulator of i386 is not faithful
void *ptr // desired address
, unsigned len // also pre-allocate space in file
, char *pathname // 0 ==> call get_upxfn_path, which stores if 1st time
@ -330,11 +326,11 @@ make_hatch(
hatch[1] = code[1];
}
else { // Does not fit at hi end of .text, so must use a new page "permanently"
unsigned long fdmap = upx_mmap_and_fd((void *)0, sizeof(code), 0);
unsigned mfd = -1+ (0xfff& fdmap);
char *fdmap = upx_mmap_and_fd((void *)0, sizeof(code), 0);
unsigned mfd = -1+ (0xfff& (unsigned)fdmap);
write(mfd, &code, sizeof(code));
hatch = mmap((void *)(fdmap & ~0xffful), sizeof(code),
PROT_READ|PROT_EXEC, MAP_PRIVATE, mfd, 0);
hatch = mmap((void *)((unsigned long)fdmap & ~0xffful), sizeof(code),
PROT_READ|PROT_EXEC, MAP_PRIVATE, mfd, 0);
close(mfd);
}
}
@ -436,18 +432,25 @@ extern void *memset(void *dst, unsigned val, size_t n);
#ifndef __arm__ //{
// Segregate large local array, to avoid code bloat due to large displacements.
static void
underlay(unsigned size, char *ptr, unsigned len, unsigned p_flags) // len < PAGE_SIZE
underlay(unsigned size, char *ptr, unsigned page_mask)
{
(void)p_flags; // for Linux ARM only
unsigned saved[-PAGE_MASK/sizeof(unsigned)];
memcpy(saved, ptr, len);
mmap(ptr, size, PROT_WRITE|PROT_READ,
MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
memcpy(ptr, saved, len);
//my_bkpt((void const *)0x1231, size, ptr, page_mask);
unsigned len = ~page_mask & (unsigned)ptr;
if (len) {
unsigned saved[(1<<16)/sizeof(unsigned)]; // maximum PAGE_SIZE
memcpy(saved, (void *)(page_mask & (long)ptr), len);
mmap(ptr, size, PROT_WRITE|PROT_READ,
MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
memcpy(ptr, saved, len);
}
else {
mmap(ptr, size, PROT_WRITE|PROT_READ,
MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
}
}
#else //}{
extern void
underlay(unsigned size, char *ptr, unsigned len, unsigned p_flags);
underlay(unsigned size, char *ptr, unsigned page_mask);
#endif //}
extern int ftruncate(int fd, size_t length);
@ -460,6 +463,41 @@ unsigned PF_to_PROT(Elf32_Phdr const *phdr)
[phdr->p_flags & (PF_R|PF_W|PF_X)];
}
unsigned
fini_SELinux(
unsigned size,
char *ptr,
Elf32_Phdr const *phdr,
unsigned mfd,
Elf32_Addr base
)
{
if (phdr->p_flags & PF_X) {
// Map the contents of mfd as per *phdr.
Punmap(ptr, size);
Pmap(ptr, size, PF_to_PROT(phdr), MAP_FIXED|MAP_PRIVATE, mfd, 0);
close(mfd);
}
else { // easy
Pprotect( (char *)(phdr->p_vaddr + base), phdr->p_memsz, PF_to_PROT(phdr));
}
return 0;
}
unsigned
prep_SELinux(unsigned size, char *ptr, unsigned len) // returns mfd
{
// Cannot set PROT_EXEC except via mmap() into a region (Linux "vma")
// that has never had PROT_WRITE. So use a Linux-only "memory file"
// to hold the contents.
char *val = upx_mmap_and_fd(ptr, size, nullptr);
unsigned mfd = 0xfff & (unsigned)val;
val -= mfd; --mfd;
if (len)
Pwrite(mfd, ptr, len); // Save lo fragment of contents on first page.
return mfd;
}
typedef struct {
long argc;
char **argv;
@ -535,112 +573,65 @@ upx_so_main( // returns &escape_hatch
// Process each read-only PT_LOAD.
// A read+write PT_LOAD might be relocated by rtld before de-compression,
// so it cannot be compressed.
struct b_info al_bi; // for aligned data from binfo
void *hatch = nullptr;
Elf32_Addr base = 0;
int n_load = 0;
for (; phdr < phdrN; ++phdr)
if ( phdr->p_type == PT_LOAD && !(phdr->p_flags & PF_W)) {
DPRINTF("phdr@%%p p_offset=%%p p_vaddr=%%p p_filesz=%%p p_memsz=%%p binfo=%%p\\n",
phdr, phdr->p_offset, phdr->p_vaddr, phdr->p_filesz, phdr->p_memsz, x0.buf);
if (phdr->p_type == PT_LOAD && !(phdr->p_flags & PF_W)) {
unsigned hi_offset = phdr->p_filesz + phdr->p_offset;
struct b_info al_bi; // for aligned data from binfo
// Need un-aligned read of b_info to determine compression sizes.
x0.size = sizeof(struct b_info);
xread(&x0, (char *)&al_bi, x0.size); // aligned binfo
x0.buf -= sizeof(al_bi); // back up (the xread() was a peek)
x1.size = hi_offset - xct_off;
x1.buf = (void *)(hi_offset + base - al_bi.sz_unc);
x0.size = al_bi.sz_cpr;
if (!base) {
base = (Elf32_Addr)va_load - phdr->p_vaddr;
DPRINTF("base=%%p\\n", base);
}
DPRINTF("phdr@%%p p_offset=%%p p_vaddr=%%p p_filesz=%%p p_memsz=%%p\\n",
phdr, phdr->p_offset, phdr->p_vaddr, phdr->p_filesz, phdr->p_memsz);
DPRINTF("x0=%%p x1=%%p\\n", &x0, &x1);
//my_bkpt((void const *)0x1230, phdr, &x0, &x1);
int mfd = 0;
char *mfd_addr = 0;
if ((phdr->p_filesz + phdr->p_offset) <= so_infc.off_xct_off) {
// Below compressed region, but might be the only PF_X segment
my_bkpt((void *)0x1244, phdr);
if (!hatch && phdr->p_flags & PF_X) {
my_bkpt((void *)0x1245, phdr);
char *haddr = base + (char *)(phdr->p_filesz + phdr->p_offset);
unsigned frag_mask = ~page_mask;
unsigned frag = frag_mask & (unsigned)haddr;
unsigned long val = upx_mmap_and_fd(haddr - frag, frag, nullptr);
mfd = 0xfff & val;
val -= mfd; if ((char *)val != (haddr - frag)) my_bkpt((void *)0x1243, val);
--mfd;
Pwrite(mfd, haddr - frag, frag); // original contents
mfd_addr = Pmap(haddr - frag, frag, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, mfd, 0);
DPRINTF("mfd_addr= %%p\\n", mfd_addr);
hatch = make_hatch(phdr, haddr, frag_mask);
Punmap(haddr - frag, -page_mask);
Pmap(haddr - frag, -page_mask, PROT_READ|PROT_EXEC, MAP_PRIVATE, mfd, 0);
close(mfd);
unsigned frag = ~page_mask & (unsigned)x1.buf;
unsigned mfd = 0;
if (!n_load) { // 1st PT_LOAD is special.
// Already ELF headers are in place, perhaps already followed
// by non-compressed loader tables below xct_off.
if (xct_off < hi_offset) { // 1st PT_LOAD also has compressed code, too
if (phdr->p_flags & PF_X) {
mfd = prep_SELinux(x1.size, x1.buf, frag);
}
else {
underlay(x1.size, x1.buf, page_mask); // also makes PROT_WRITE
}
unpackExtent(&x0, &x1);
if (!hatch && phdr->p_flags & PF_X) {
hatch = make_hatch(phdr, x1.buf, ~page_mask);
}
my_bkpt((void const *)0x1235, &x1);
fini_SELinux(x1.size, x1.buf, phdr, mfd, base); // FIXME: x1 changed!
}
}
Elf32_Addr const pfx = (so_infc.off_xct_off < phdr->p_offset)
? 0 // entire PT_LOAD is compressed
: so_infc.off_xct_off - phdr->p_offset ; // below xct_off is not
x0.size = sizeof(struct b_info);
xread(&x0, (char *)&al_bi, x0.size); // aligned binfo
x0.buf -= sizeof(al_bi); // back up (the xread() was a peek)
my_bkpt((void *)0x1248, &x0);
DPRINTF("next1 pfx=%%x binfo@%%p (%%p %%p %%p)\\n", pfx, x0.buf,
al_bi.sz_unc, al_bi.sz_cpr, *(unsigned *)(void *)&al_bi.b_method);
my_bkpt((void *)0x1246, phdr);
// Using .p_memsz implicitly handles .bss via MAP_ANONYMOUS.
// Omit any non-tcompressed prefix (below xct_off)
x1.buf = (char *)(pfx + phdr->p_vaddr + base);
x1.size = phdr->p_memsz;
if (phdr->p_memsz > pfx) {
x1.size -= pfx;
}
else if (!n_load) {
++n_load; // 0 --> 1
continue;
}
unsigned const frag = (phdr->p_vaddr + pfx) & ~page_mask; // lo fragment on page
x1.buf -= frag;
x1.size += frag;
DPRINTF("phdr(%%p %%p) xct_off=%%x frag=%%x\\n", x1.buf, x1.size, xct_off, frag);
if (phdr->p_flags & PF_X) { // SELinux
// Cannot set PROT_EXEC except via mmap() into a region (Linux "vma")
// that has never had PROT_WRITE. So use a Linux-only "memory file"
// to hold the contents.
unsigned long val = upx_mmap_and_fd(x1.buf, x1.size, nullptr);
mfd = 0xfff & val;
val -= mfd; if ((char *)val != x1.buf) my_bkpt((void *)0x1241, &x1);
--mfd;
Pwrite(mfd, x1.buf, frag); // Save lo fragment of contents on first page.
//Punmap(x1.buf, x1.size);
mfd_addr = Pmap(x1.buf, x1.size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, mfd, 0);
DPRINTF("mfd_addr= %%p\\n", mfd_addr); // Re-use the address space
}
else {
underlay(x1.size, x1.buf, frag, phdr->p_flags); // also makes PROT_WRITE
}
x1.buf += frag;
x1.size = al_bi.sz_unc;
x0.size = al_bi.sz_cpr + sizeof(struct b_info);
DPRINTF("before unpack x0=(%%p %%p x1=(%%p %%p)\\n", x0.size, x0.buf, x1.size, x1.buf);
unpackExtent(&x0, &x1); // updates x0 and x1
DPRINTF(" after unpack x0=(%%p %%p x1=(%%p %%p)\\n", x0.size, x0.buf, x1.size, x1.buf);
if (!hatch && phdr->p_flags & PF_X) {
hatch = make_hatch(phdr, x1.buf, ~page_mask);
}
if (phdr->p_flags & PF_X) { // SELinux
// Map the contents of mfd as per *phdr.
DPRINTF("mfd mmap addr=%%p len=%%p\\n", (phdr->p_vaddr + base + pfx), al_bi.sz_unc);
//Punmap(mfd_addr, frag + al_bi.sz_unc); // Discard RW mapping; mfd has the bytes
Pmap((char *)(phdr->p_vaddr + base + pfx), al_bi.sz_unc, PF_to_PROT(phdr),
MAP_FIXED|MAP_PRIVATE, mfd, 0);
close(mfd);
}
else { // easy
Pprotect( (char *)(phdr->p_vaddr + base), phdr->p_memsz, PF_to_PROT(phdr));
else { // 2nd and later PT_LOADs
x1.buf = (void *)(phdr->p_vaddr + base);
x1.size = phdr->p_filesz;
if (phdr->p_flags & PF_X) {
mfd = prep_SELinux(x1.size, x1.buf, frag);
}
else {
underlay(x1.size, x1.buf, page_mask); // also makes PROT_WRITE
}
unpackExtent(&x0, &x1);
if (!hatch && phdr->p_flags &PF_X) {
hatch = make_hatch(phdr, x1.buf, ~page_mask);
}
fini_SELinux(al_bi.sz_unc, (void *)(phdr->p_vaddr + base), phdr, mfd, base);
}
++n_load;
}