#if 0 # # http://fy.chalmers.se/~appro/linux/sysenter.c # # Copyright (c) 2001 by # # This is Linux kernel module implementing support for Intel SYSENTER # "Fast System Call" facility (see "IA-32 System Programming Guide" for # further details). Itīs meant for research purposes and is subject to # usual "no-warranty-donīt-complain" blah-blah... # # As I figured after the module was almost ready the matter has already # been discussed on the kernel development list in late December, 1999 # and the idea was basically condemned... Well, when I started I wanted # to learn kernel insights and I certainly did so that the project # "goal" was achieved. And then I felt like sharing it:-) # # Note that itīs not a patch, but a loadable module! Well, it does some # *run-time* patching to 2.2.x kernel by replacing first instructions of # the context switching procedure with jump instruction tfansferring the # control to equivalent procedure of own design, but there is no source # code patching involved... # # If executed, this file (yes, this is self-compiling C-code:-) will # compile and install the module, compile shared object (to be preloaded # with LD_PRELOAD) overriding couple of system calls and benchmarks a # syscall hungry application, namely īdd if=/dev/zero of=/dev/nullī:-) # On a 550MHz PIII the program exhibits 20% performance improvement when # tricked to take the SYSENTER path. # # Pre-requirements. Up-to-date kernel headers in /usr/src/linux as well # as System.map (alternatively /boot/System.map) matching the current # /proc/ksyms. See the embedded script for details. I use UTS_RELEASE # and address of sock_register() function for fingerprinting in order to # prevent module compiled for another kernel from being loaded. # # Questions and answers. # # Q. Is the module SMP-safe? # A. Almost. When being compiled for 2.2.x the code can be compiled to # be "cruel" (run-time patching, see above, default behavior) and # "gentle." "Cruel" version is SMP-safe, "non-cruel" is not (because # it uses a global variable). Under 2.4.x the generated code is # always SMP-safe (unlike 2.2.x 2.4.x doesnīt flip the Task Register # at context switch thus making pointer to TSS a perfect candidate # for SYSENTER_ESP_MSR:-). # # Q. How come the handler doesnīt manage so called "bottom halves" or # "soft IRQs"? # A. There is no need for this. Soft IRQs can only appear at exit from # hardware interrupt handlers. Indeed, we canīt count on user app. # being around and performing a system call when it comes to # interrupt handling, right? # # Q. Where do you set up the pointer to the current task structure? # A. Normally itīs done with "%%esp & -8192", right? Here itīs done with # "leal -8152(%esp),%ebp" (i.e. with carefully chosen offset:-). Itīs # possible because user originated system call is always executed # from the very bottom of kernel stack. No, kernel canīt use SYSENTER # as SYSEXIT unconditionally drops CPL to 3. # # Q. Why arenīt there any fix-ups? # A. Well, "1: movl -4(%%ebp),%%edi # fetch the return address" and # "2: movl (%%ebp),%%ebp # fetch 6-th argument" should probably be # guarded by fix-up code. On the other hand the arguments are # expected to be passed through "flat" (stack) segment. As we canīt # possibly know if user stack segment was "flat" (and we basically # shouldnīt care as SYSEXIT unconditionally write 0x2B to %ss) there # isnīt much reasonable we can do, but to let it fail with segfault. # Well, I could call do_exit() in order to kill the application with # little more discretion... Give me a reason to... # # Q. What about VM86? # A. VM86 task should never call SYSENTER. It it does it should simply # be terminated. Right now I do nothing about it counting on that the # user code will screw itself (see also previous question) at SYSEXIT # which unconditionally overwrites %cs and %ss with predefined # values. # # Q. What about AMDīs SYSCALL/SYSRET? # A. Adaptation is trivial. Itīs not done only because I donīt have # access to appropriate AMD box to play with. Keep in mind that AMD # didnīt bother to provide means for setting up %esp so that "non- # cruel" code is the only option. Then it should also be noted that # later AMD processors implement Intelīs SYSENTER/SYSEXIT. # # Cheers! Andy. # /bin/sh << EOS MODNAME=\`basename "$0" .c\` LINUX_SRC=/usr/src/linux if [ -f \$LINUX_SRC/System.map ]; then SYSTEM_MAP=\$LINUX_SRC/System.map else SYSTEM_MAP=/boot/System.map fi if [ ! -r \$SYSTEM_MAP ]; then echo "Missing \$SYSTEM_MAP. Aborting!";exit fi SOCK_REGISTER=\`awk /sock_register/'{printf"%s",\$1}' /proc/ksyms\` SOCK_REGISTER=\`awk /^\$SOCK_REGISTER/'{printf"%s",\$1}' \$SYSTEM_MAP\` if [ \"_\$SOCK_REGISTER\" = \"_\" ]; then echo "Symbol mismatch between /proc/ksyms and \$SYSTEM_MAP. Aborting!";exit fi __SWITCH_TO=\`awk '/__switch_to/{printf"%s",\$1}' \$SYSTEM_MAP\` if [ \"_\$__SWITCH_TO\" = \"_\" ]; then echo "Unable to locate \"__switch_to\" in \$SYSTEM_MAP. Aborting!";exit fi GDT_TABLE=\`awk '/gdt_table/{printf"%s",\$1}' \$SYSTEM_MAP\` if [ \"_\$GDT_TABLE\" = \"_\" ]; then echo "Unable to locate \"gdt_table\" in \$SYSTEM_MAP. Aborting!";exit fi DO_SIGNAL=\`awk '/do_signal/{printf"%s",\$1}' \$SYSTEM_MAP\` if [ \"_\$DO_SIGNAL\" = \"_\" ]; then echo "Unable to locate \"DO_SIGNAL\" in \$SYSTEM_MAP. Aborting!";exit fi SYSCALL_TRACE=\`awk '/syscall_trace/{printf"%s",\$1}' \$SYSTEM_MAP\` if [ \"_\$SYSCALL_TRACE\" = \"_\" ]; then echo "Unable to locate \"SYSCALL_TRACE\" in \$SYSTEM_MAP. Aborting!";exit fi if ( set -x; gcc -O -fbuiltin -fomit-frame-pointer \ -D__KERNEL__ -DMODULE \ -I\${LINUX_SRC}/include \ -D__SWITCH_TO=0x\${__SWITCH_TO}U \ -DGDT_TABLE=0x\${GDT_TABLE}U \ -DSOCK_REGISTER=0x\${SOCK_REGISTER}U \ -DDO_SIGNAL=0x\${DO_SIGNAL}U \ -DSYSCALL_TRACE=0x\${SYSCALL_TRACE}U \ -DREALLY_CRUEL \ -c "$0" ) then echo ( set -x; modprobe -r \$MODNAME ) if ( set -x; insmod \$MODNAME.o ) then ( set -x; gcc -O -fPIC -fomit-frame-pointer \ -I\${LINUX_SRC}/include \ -shared -nostdlib \ -o sysenter.so "$0" time dd if=/dev/zero of=/dev/null count=2500000 env LD_PRELOAD=./sysenter.so \ time dd if=/dev/zero of=/dev/null count=2500000 ) fi fi EOS exit #endif #ifdef __KERNEL__ #ifndef __SWITCH_TO #error "Missing __SWITCH_TO definition." #endif #ifndef GDT_TABLE #error "Missing GDT_TABLE definition." #endif #ifndef SOCK_REGISTER #error "Missing SOCK_REGISTER definition." #endif #ifndef DO_SIGNAL #error "Missing DO_SIGNAL definition." #endif #ifndef SYSCALL_TRACE #error "Missing SYSCALL_TRACE definition." #endif #ifndef MODULE #define MODULE #endif #include #include #ifndef KERNEL_VERSION #define KERNEL_VERSION(a,b,c) (((a)<<16)|((b)<<8)|(c)) #endif #undef I_KNOW #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0) # define I_KNOW #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) # undef I_KNOW #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) # define I_KNOW #endif #ifndef I_KNOW # error "I'm not familiar with your kernel:-(" #endif #undef I_KNOW #if defined(CONFIG_SMP) && !defined(__SMP__) #define __SMP__ #endif #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,2,255) #if defined(CONFIG_SMP) && !defined(REALLY_CRUEL) #error "Has to be cruel with this kernel!" #endif #endif #if CONFIG_MODVERSIONS==1 #ifndef MODVERSIONS #define MODVERSIONS #endif #include #endif #include #include #include #include #ifdef CONFIG_SMP #include #endif #include #include #include #include #include #include #include #include #include #include extern void *sys_call_table[]; extern void schedule(); static volatile int true=1; #ifdef REALLY_CRUEL # if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) # warning "Don't have to be cruel with this kernel:-)" # undef REALLY_CRUEL # elif LINUX_VERSION_CODE <= KERNEL_VERSION(2,2,255) static unsigned int orig_machine_code [2]; static void my__switch_to(struct task_struct *, struct task_struct *) __attribute__((regparm(2))); static void my__switch_to(struct task_struct *prev, struct task_struct *next) { struct desc_struct * const gdt_table=(struct desc_struct *)GDT_TABLE; /* * The whole thing with cmpxchg8b in the init_module() is exclusively * for this "alien" code which is resetting SYSENTER_ESP_MSR at the * context switch. */ asm volatile(" xorl %%edx,%%edx movl $0x175,%%ecx wrmsr" : : "a"(&(next->tss)) : "ecx","edx","cc" ); /* * The rest is verbatim copy of __switch_to from * linux-2.2.16/arch/i386/kernel/process.c. */ #define loaddebug(tsk,register) \ __asm__("movl %0,%%db" #register \ : /* no output */ \ :"r" (tsk->tss.debugreg[register])) /* Do the FPU save and set TS if it wasn't set before.. */ unlazy_fpu(prev); /* * Reload TR, LDT and the page table pointers.. * * We need TR for the IO permission bitmask (and * the vm86 bitmasks in case we ever use enhanced * v86 mode properly). * * We may want to get rid of the TR register some * day, and copy the bitmaps around by hand. Oh, * well. In the meantime we have to clear the busy * bit in the TSS entry, ugh. */ gdt_table[next->tss.tr >> 3].b &= 0xfffffdff; asm volatile("ltr %0": :"g" (*(unsigned short *)&next->tss.tr)); /* * Save away %fs and %gs. No need to save %es and %ds, as * those are always kernel segments while inside the kernel. */ asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->tss.fs)); asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->tss.gs)); /* Re-load LDT if necessary */ if (next->mm->segments != prev->mm->segments) asm volatile("lldt %0": :"g" (*(unsigned short *)&next->tss.ldt)); /* Re-load page tables */ { unsigned long new_cr3 = next->tss.cr3; if (new_cr3 != prev->tss.cr3) asm volatile("movl %0,%%cr3": :"r" (new_cr3)); } /* * Restore %fs and %gs. */ loadsegment(fs,next->tss.fs); loadsegment(gs,next->tss.gs); /* * Now maybe reload the debug registers */ if (next->tss.debugreg[7]){ loaddebug(next,0); loaddebug(next,1); loaddebug(next,2); loaddebug(next,3); loaddebug(next,6); loaddebug(next,7); } } # endif #else # if LINUX_VERSION_CODE <= KERNEL_VERSION(2,2,255) static unsigned int saved_edi asm (".Lsaved_edi"); # endif #endif #define TS_FOFF(field) (((struct task_struct *)NULL)->field) static void *sysenter_handler () { void *ret; asm volatile ("mov $.Lthe_handler,%0" : "=r"(ret)); if (true) /* don't let the rest be optimized away */ return ret; asm volatile (" .align 8 .Lthe_handler:" #if !defined(REALLY_CRUEL) && LINUX_VERSION_CODE <= KERNEL_VERSION(2,2,255) " movl %%edi,%%ss:.Lsaved_edi str %%di leal %6(%%edi),%%edi # locate TSS descriptor in GDT movl %%ss:0(%%edi),%%esp movl %%ss:4(%%edi),%%edi shrd $8,%%edi,%%esp shrl $24,%%edi nop shrd $8,%%edi,%%esp # %%esp points at TSS now .align 8 movl %%ss:.Lsaved_edi,%%edi .align 8 " #endif " movl 4(%%esp),%%esp # %%esp is expected to point at TSS! sti pushl %%edi pushl %%esi pushl %%ebx 1: movl -4(%%ebp),%%edi # fetch the return address pushl %%ebp # return stack pointer pushl %%edi # return address 2: movl (%%ebp),%%ebp # fetch 6-th argument pushl %%eax movl 20(%%esp),%%edi # restore 5-th argument subl $8,%%esp # SAVE_ALL \"starts\" here pushl %%eax leal sys_call_table(,%%eax,4),%%eax pushl %%ebp # 6-th argument leal -8152(%%esp),%%ebp pushl %%edi # 5-th argument pushl %%esi # 4-th argument movl %%ds,%%edi testb $0x20,%1(%%ebp) # current->flags & PF_TRACESYS pushl %%edx # 3-rd argument movl %%es,%%esi movl %%ss,%%edx pushl %%ecx # 2-nd argument movl %%edx,%%ds movl 20(%%esp),%%ecx # pull syscall nr. pushl %%ebx # 1-st argument movl %%edx,%%es nop jne .Ltraced_sys cmpl %0,%%ecx # NR_syscall jae .Lbad_sys leal 0(%%esi),%%esi call %%ss:*(%%eax) movl %%eax,24(%%esp) cmpl $0,%2(%%ebp) # current->need_resched jne .Lneed_reschedule movl %%esi,%%esi cmpl $0,%3(%%ebp) # current->sigpending jne .Lsignal_pending .Lreturn: movl 24(%%esp),%%eax movl %%edi,%%ds addl $40,%%esp # \"corresponds\" to RESTORE_ALL macro movl %%esi,%%es .Lsysexit: popl %%edx # return address popl %%ecx # return stack pointer popl %%ebx popl %%esi popl %%edi sysexit hlt # stop prefetching instructions .align 4 .Lneed_reschedule: call schedule cmpl $0,%3(%%ebp) # signal pending je .Lreturn .align 4 .Lsignal_pending: mov %%esp,%%eax xorl %%edx,%%edx call %4 # do_signal(); jmp .Lreturn .align 4 .Lbad_sys: movl $-38,24(%%esp) # -ENOSYS jmp .Lreturn .align 4 .Ltraced_sys: movl $-38,24(%%esp) # -ENOSYS movl %%eax,%%ebx call %5 # syscall_trace(); cmpl %0,36(%%esp) jae 1f call %%ss:*(%%ebx) movl %%eax,24(%%esp) 1: call %5 # syscall_trace(); jmp .Lreturn " : :"i"(NR_syscalls), /* %0 */ "m"(TS_FOFF(flags)), /* %1 */ "m"(TS_FOFF(need_resched)), /* %2 */ "m"(TS_FOFF(sigpending)), /* %3 */ "m"(*((unsigned int *)DO_SIGNAL)), /* %4 */ "m"(*((unsigned int *)SYSCALL_TRACE)), /* %5 */ "m"(*((unsigned int *)GDT_TABLE)) /* %6 */ :"eax","ecx","edx","ebx","edi","esi","ebp","esp","cc","memory"); } static void init_sysenter_up (void *info) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) asm volatile (" str %%ax leal %0(%%eax),%%edx # locate TSS descriptor in GDT movl 0(%%edx),%%eax movl 4(%%edx),%%edx shrd $8,%%edx,%%eax shrl $24,%%edx shrd $8,%%edx,%%eax # locate TSS itself in memory xorl %%edx,%%edx movl $0x175,%%ecx wrmsr" : : "m"(*((unsigned int *)GDT_TABLE)) : "eax","ecx","edx" ); #endif asm (" movl %0,%%eax xorl %%edx,%%edx movl $0x174,%%ecx wrmsr movl %1,%%eax xorl %%edx,%%edx movl $0x176,%%ecx wrmsr" : : "i"(__KERNEL_CS),"r"(sysenter_handler()) : "eax","ecx","edx" ); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) #define X86_CAP(cpu) ((cpu)->x86_capability[0]) #else #define X86_CAP(cpu) ((cpu)->x86_capability) #endif static inline void serializing_instruction (void *p) { asm volatile ("xorl %%eax,%%eax;cpuid":::"eax","ebx","ecx","edx"); } int init_module() { struct cpuinfo_x86 *cpu=&boot_cpu_data; if ( strcmp (system_utsname.release,UTS_RELEASE) || (unsigned int)sock_register != SOCK_REGISTER ) { printk ("SYSENTER: Module was compiled for "UTS_RELEASE". Recompile!\n"); return -EINVAL; } if ( ( (X86_CAP(cpu) & X86_FEATURE_SEP) == 0) || ( (cpu->x86_vendor == X86_VENDOR_INTEL) && (cpu->x86 == 6) && (cpu->x86_model < 3) && (cpu->x86_mask < 3) ) ) { printk ("SYSENTER: SYSENTER is not supported by this CPU.\n"); return -EINVAL; } #ifdef REALLY_CRUEL if ((X86_CAP(cpu) & X86_FEATURE_CX8) == 0) { printk ("SYSENTER: CMPXCHG8B is not supported by this CPU.\n"); return -EINVAL; } { register unsigned int offset, sys__switch_to=__SWITCH_TO; offset = (unsigned int)my__switch_to - (sys__switch_to + 8); asm volatile (" movl 0(%2),%%eax movl 4(%2),%%edx lock cmpxchg8b (%2)" : "=a"(orig_machine_code[0]),"=d"(orig_machine_code[1]) : "D"(sys__switch_to), "b"(0xe900768d),"c"(offset) : "cc" ); } #ifdef CONFIG_SMP if (smp_call_function (serializing_instruction,NULL,0,1) != 0) printk ("SYSENTER: timed out waiting for other CPUs.\n"); #else serializing_instruction (NULL); #endif #endif #ifdef CONFIG_SMP if (smp_call_function (init_sysenter_up,NULL,0,1) != 0) printk ("SYSENTER: timed out waiting for other CPUs.\n"); #else init_sysenter_up (NULL); #endif printk ("SYSENTER: handler is at %x\n",sysenter_handler()); return 0; } static void cleanup_sysenter_up (void *info) { asm volatile (" xorl %%eax,%%eax xorl %%edx,%%edx movl $0x174,%%ecx wrmsr movl $0x175,%%ecx wrmsr movl $0x176,%%ecx wrmsr" : : : "eax","ecx","edx" ); } void cleanup_module() { #ifdef CONFIG_SMP if (smp_call_function (cleanup_sysenter_up,NULL,0,1) != 0) printk ("SYSENTER: timed out waiting for other CPUs.\n"); #else cleanup_sysenter_up (NULL); #endif #ifdef REALLY_CRUEL { register unsigned int offset, sys__switch_to=__SWITCH_TO, eax, edx; offset = (unsigned int)my__switch_to - (sys__switch_to + 8); asm volatile ( "lock cmpxchg8b (%2)" : "=a"(eax),"=d"(edx) : "D"(sys__switch_to), "a"(0xe900768d),"d"(offset), "b"(orig_machine_code[0]),"c"(orig_machine_code[1]) : "cc" ); asm volatile ("jnz .Lpanic"); #ifdef CONFIG_SMP if (smp_call_function (serializing_instruction,NULL,0,1) != 0) printk ("SYSENTER: timed out waiting for other CPUs.\n"); #else serializing_instruction (NULL); #endif if (true) return; asm volatile (".Lpanic:"); /* circumvent gcc bug */ panic ("Somebody else has modified __switch_to [%04x:%04x]!\n",edx,eax); } #endif } #else #include #define _LIBC #define _LIBC_REENTRANT #include asm (" .text .align 8 .L__sysenter: sysenter hlt .align 8 "); #define sysenter_0(n) ({ \ int ret; \ asm volatile (" \ pushl %%ebp \n\ movl %%esp,%%ebp \n\ call .L__sysenter \n\ popl %%ebp" \ : "=a"(ret) \ : "a"(n) \ : "ecx","edx"); \ ((unsigned int)ret<=0xfffff000) ? ret : \ (__set_errno(-ret), ret=-1); }) #define sysenter_1(n,a1) ({ \ int ret; \ asm volatile (" \ movl %%ebx,%%edi \n\ movl %2,%%ebx \n\ pushl %%ebp \n\ movl %%esp,%%ebp \n\ call .L__sysenter \n\ movl %%edi,%%ebx \n\ popl %%ebp" \ : "=a"(ret) \ : "a"(n),"m"(a1) \ : "ecx","edx","edi"); \ ((unsigned int)ret<=0xfffff000) ? ret : \ (__set_errno(-ret), ret=-1); }) #define sysenter_2(n,a1) ({ \ int ret; \ asm volatile (" \ movl %%ebx,%%edi \n\ movl %2,%%ebx \n\ movl 4+%2,%%ecx \n\ pushl %%ebp \n\ movl %%esp,%%ebp \n\ call .L__sysenter \n\ movl %%edi,%%ebx \n\ popl %%ebp" \ : "=a"(ret) \ : "a"(n),"m"(a1) \ : "ecx","edx","edi"); \ ((unsigned int)ret<=0xfffff000) ? ret : \ (__set_errno(-ret), ret=-1); }) #define sysenter_3(n,a1) ({ \ int ret; \ asm volatile (" \ movl %%ebx,%%edi \n\ movl %2,%%ebx \n\ movl 4+%2,%%ecx \n\ movl 8+%2,%%edx \n\ pushl %%ebp \n\ movl %%esp,%%ebp \n\ call .L__sysenter \n\ movl %%edi,%%ebx \n\ popl %%ebp" \ : "=a"(ret) \ : "a"(n),"m"(a1) \ : "ecx","edx","edi"); \ ((unsigned int)ret<=0xfffff000) ? ret : \ (__set_errno(-ret), ret=-1); }) #define sysenter_4(n,a1) ({ \ int ret; \ asm volatile (" \ movl %%ebx,%%edi \n\ movl %2,%%ebx \n\ movl 4+%2,%%ecx \n\ movl 8+%2,%%edx \n\ movl 12+%2,%%esi \n\ pushl %%ebp \n\ movl %%esp,%%ebp \n\ call .L__sysenter \n\ movl %%edi,%%ebx \n\ popl %%ebp" \ : "=a"(ret) \ : "a"(n),"m"(a1) \ : "ebx","ecx","edx","esi","edi"); \ ((unsigned int)ret<=0xfffff000) ? ret : \ (__set_errno(-ret), ret=-1); }) #define sysenter_5(n,a1) ({ \ int ret; \ asm volatile (" \ pushl %%ebx \n\ movl %2,%%ebx \n\ movl 4+%2,%%ecx \n\ movl 8+%2,%%edx \n\ movl 12+%2,%%esi \n\ movl 16+%2,%%edi \n\ pushl %%ebp \n\ movl %%esp,%%ebp \n\ call .L__sysenter \n\ popl %%ebp \n\ popl %%ebx" \ : "=a"(ret) \ : "a"(n),"m"(a1) \ : "ebx","ecx","edx","esi","edi"); \ ((unsigned int)ret<=0xfffff000) ? ret : \ (__set_errno(-ret), ret=-1); }) #define sysenter_6(n,a1) ({ \ int ret; \ asm volatile (" \ pushl %%ebx \n\ movl %2,%%ebx \n\ movl 4+%2,%%ecx \n\ movl 8+%2,%%edx \n\ movl 12+%2,%%esi \n\ movl 16+%2,%%edi \n\ movl 20+%2,%%ebp \n\ pushl %%ebp \n\ movl %%esp,%%ebp \n\ call .L__sysenter \n\ popl %%ebp \n\ popl %%ebx" \ : "=a"(ret) \ : "a"(n),"m"(a1) \ : "ebx","ecx","edx","esi","edi","ebp"); \ ((unsigned int)ret<=0xfffff000) ? ret : \ (__set_errno(-ret), ret=-1); }) int write () __attribute__ ((weak,alias ("__write"))); int __write (int fd, ...) { return sysenter_3 (SYS_write,fd); } int read () __attribute__ ((weak,alias ("__read"))); int __read (int fd, ...) { return sysenter_3 (SYS_read,fd); } int fcntl () __attribute__ ((weak,alias ("__fcntl"))); int __fcntl (int fd, ...) { return sysenter_3 (SYS_fcntl,fd); } int ioctl () __attribute__ ((weak,alias ("__ioctl"))); int __ioctl (int fd, ...) { return sysenter_4 (SYS_ioctl,fd); } int select () __attribute__ ((weak,alias ("__select"))); int __select (int n, ...) { return sysenter_5 (SYS__newselect,n); } #endif