Browse Source

unify usage of inline assembly

pr/gpio
Kaspar Schleiser 7 years ago
parent
commit
c3f7186d4e
  1. 2
      boards/mulle/board.c
  2. 2
      boards/qemu-i386/x86_board_lpm.c
  3. 2
      boards/telosb/board.c
  4. 14
      boards/x86-multiboot-common/startup.c
  5. 6
      cpu/arm7_common/VIC.c
  6. 4
      cpu/arm7_common/arm_cpu.c
  7. 4
      cpu/atmega2560/startup.c
  8. 4
      cpu/atmega_common/irq_arch.c
  9. 8
      cpu/atmega_common/thread_arch.c
  10. 4
      cpu/cc430/cc430-gpioint.c
  11. 2
      cpu/cortexm_common/panic.c
  12. 4
      cpu/cortexm_common/thread_arch.c
  13. 10
      cpu/cortexm_common/vectors_cortexm.c
  14. 2
      cpu/ezr32wg/vectors.c
  15. 2
      cpu/k60/cpu.c
  16. 2
      cpu/k60/ssp.c
  17. 4
      cpu/x86/include/cpu.h
  18. 4
      cpu/x86/include/x86_interrupts.h
  19. 26
      cpu/x86/include/x86_ports.h
  20. 20
      cpu/x86/include/x86_registers.h
  21. 2
      cpu/x86/x86_atomic.c
  22. 78
      cpu/x86/x86_interrupts.c
  23. 22
      cpu/x86/x86_memory.c
  24. 8
      cpu/x86/x86_reboot.c
  25. 2
      cpu/x86/x86_rtc.c
  26. 14
      cpu/x86/x86_threading.c
  27. 4
      cpu/x86/x86_uart.c
  28. 110
      cpu/x86/x86_ucontext.c
  29. 2
      tests/fault_handler/main.c
  30. 2
      tests/leds/main.c

2
boards/mulle/board.c

@ -102,7 +102,7 @@ void board_init(void)
* (If the clock is not stable all UART output is garbled until it has
* stabilized) */
for (int i = 0; i < 100000; ++i) {
asm volatile("nop\n");
__asm__ volatile("nop\n");
}
/* Update SystemCoreClock global var */

2
boards/qemu-i386/x86_board_lpm.c

@ -32,7 +32,7 @@ enum lpm_mode lpm_set(enum lpm_mode target)
if (target == LPM_POWERDOWN) {
x86_shutdown();
}
asm volatile ("hlt");
__asm__ volatile ("hlt");
}
return LPM_UNKNOWN;
}

2
boards/telosb/board.c

@ -67,7 +67,7 @@ void msp430_init_dco(void)
BCSCTL1 |= DIVA1 + DIVA0; /* ACLK = LFXT1CLK/8 */
for (i = 0xFFFF; i > 0; i--) { /* Delay for XTAL to settle */
asm("nop");
__asm__("nop");
}
CCTL2 = CCIS0 + CM0 + CAP; /* Define CCR2, CAP, ACLK */

14
boards/x86-multiboot-common/startup.c

@ -62,12 +62,12 @@ const multiboot_header_t multiboot_header = {
void __attribute__((noreturn, optimize("Os", "omit-frame-pointer"), no_instrument_function)) _start(void)
{
asm volatile ("xor %ebp, %ebp");
asm volatile ("push %ebp");
asm volatile ("push %ebx");
asm volatile ("push %eax");
asm volatile ("push %ebp");
asm volatile ("jmp *%0" :: "r"(&startup));
__asm__ volatile ("xor %ebp, %ebp");
__asm__ volatile ("push %ebp");
__asm__ volatile ("push %ebx");
__asm__ volatile ("push %eax");
__asm__ volatile ("push %ebp");
__asm__ volatile ("jmp *%0" :: "r"(&startup));
__builtin_unreachable();
}
@ -112,7 +112,7 @@ static void have_a_break(void)
{
volatile bool cnt = false;
while (!cnt) {
asm volatile ("pause");
__asm__ volatile ("pause");
}
}

6
cpu/arm7_common/VIC.c

@ -14,20 +14,20 @@
static inline unsigned __get_cpsr(void)
{
unsigned long retval;
asm volatile(" mrs %0, cpsr" : "=r"(retval) : /* no inputs */);
__asm__ volatile(" mrs %0, cpsr" : "=r"(retval) : /* no inputs */);
return retval;
}
int irq_is_in(void)
{
int retval;
asm volatile(" mrs %0, cpsr" : "=r"(retval) : /* no inputs */);
__asm__ volatile(" mrs %0, cpsr" : "=r"(retval) : /* no inputs */);
return (retval & INTMode) == 18;
}
static inline void __set_cpsr(unsigned val)
{
asm volatile(" msr cpsr, %0" : /* no outputs */ : "r"(val));
__asm__ volatile(" msr cpsr, %0" : /* no outputs */ : "r"(val));
}
unsigned irq_disable(void)

4
cpu/arm7_common/arm_cpu.c

@ -25,7 +25,7 @@
void thread_yield_higher(void)
{
asm("svc 0\n");
__asm__("svc 0\n");
}
/*----------------------------------------------------------------------------
@ -72,7 +72,7 @@ char *thread_stack_init(thread_task_func_t task_func, void *arg, void *stack_sta
void thread_print_stack(void)
{
register void *stack = 0;
asm("mov %0, sp" : "=r"(stack));
__asm__("mov %0, sp" : "=r"(stack));
register unsigned int *s = (unsigned int *)stack;
printf("task: %X SP: %X\n", (unsigned int) sched_active_thread, (unsigned int) stack);

4
cpu/atmega2560/startup.c

@ -49,12 +49,12 @@ void init8_ovr(void) __attribute__((naked)) __attribute__((section(".init8")));
void init7_ovr(void)
{
asm("call reset_handler");
__asm__("call reset_handler");
}
void init8_ovr(void)
{
asm("jmp exit");
__asm__("jmp exit");
}
/**
* @brief This function is the entry point after a system reset

4
cpu/atmega_common/irq_arch.c

@ -35,7 +35,7 @@ volatile uint8_t __in_isr = 0;
__attribute__((always_inline)) static inline uint8_t __get_interrupt_state(void)
{
uint8_t sreg;
asm volatile("in r0, __SREG__; \n\t"
__asm__ volatile("in r0, __SREG__; \n\t"
"mov %0, r0 \n\t"
: "=g"(sreg)
:
@ -45,7 +45,7 @@ __attribute__((always_inline)) static inline uint8_t __get_interrupt_state(void
__attribute__((always_inline)) inline void __set_interrupt_state(uint8_t state)
{
asm volatile("mov r15,%0; \n\t"
__asm__ volatile("mov r15,%0; \n\t"
"in r16, __SREG__; \n\t"
"cbr r16,7; \n\t"
"or r15,r16; \n\t"

8
cpu/atmega_common/thread_arch.c

@ -214,7 +214,7 @@ void NORETURN __enter_thread_mode(void)
{
irq_enable();
__context_restore();
asm volatile("ret");
__asm__ volatile("ret");
UNREACHABLE();
}
@ -229,13 +229,13 @@ void thread_arch_yield(void)
irq_enable();
__context_restore();
asm volatile("ret");
__asm__ volatile("ret");
}
__attribute__((always_inline)) static inline void __context_save(void)
{
asm volatile(
__asm__ volatile(
"push r0 \n\t"
"in r0, __SREG__ \n\t"
"cli \n\t"
@ -291,7 +291,7 @@ __attribute__((always_inline)) static inline void __context_save(void)
__attribute__((always_inline)) static inline void __context_restore(void)
{
asm volatile(
__asm__ volatile(
"lds r26, sched_active_thread \n\t"
"lds r27, sched_active_thread + 1 \n\t"
"ld r28, x+ \n\t"

4
cpu/cc430/cc430-gpioint.c

@ -174,7 +174,7 @@ interrupt(PORT1_VECTOR) __attribute__((naked)) port1_isr(void)
}
else {
/* TODO: check for long duration irq */
asm volatile(" nop ");
__asm__ volatile(" nop ");
}
}
else {
@ -221,7 +221,7 @@ interrupt(PORT2_VECTOR) __attribute__((naked)) port2_isr(void)
else {
c2++;
/* TODO: check for long duration irq */
asm volatile(" nop ");
__asm__ volatile(" nop ");
}
}
else {

2
cpu/cortexm_common/panic.c

@ -25,7 +25,7 @@ void panic_arch(void)
{
#ifdef DEVELHELP
/* The bkpt instruction will signal to the debugger to break here. */
__ASM("bkpt #0");
__asm__("bkpt #0");
/* enter infinite loop, into deepest possible sleep mode */
while (1) {
lpm_set(LPM_OFF);

4
cpu/cortexm_common/thread_arch.c

@ -253,7 +253,7 @@ void thread_arch_stack_print(void)
__attribute__((naked)) void NORETURN thread_arch_start_threading(void)
{
__ASM volatile (
__asm__ volatile (
"bl irq_arch_enable \n" /* enable IRQs to make the SVC
* interrupt is reachable */
"svc #1 \n" /* trigger the SVC interrupt */
@ -271,7 +271,7 @@ void thread_arch_yield(void)
__attribute__((naked)) void arch_context_switch(void)
{
__ASM volatile (
__asm__ volatile (
/* PendSV handler entry point */
".global isr_pendsv \n"
".thumb_func \n"

10
cpu/cortexm_common/vectors_cortexm.c

@ -34,7 +34,7 @@
/**
* @brief Interrupt stack canary value
*
* @note 0xe7fe is the ARM Thumb machine code equivalent of asm("bl #-2\n") or
* @note 0xe7fe is the ARM Thumb machine code equivalent of __asm__("bl #-2\n") or
* 'while (1);', i.e. an infinite loop.
*/
#define STACK_CANARY_WORD 0xE7FEE7FEu
@ -86,7 +86,7 @@ void reset_handler_default(void)
uint32_t *top;
/* Fill stack space with canary values up until the current stack pointer */
/* Read current stack pointer from CPU register */
asm volatile ("mov %[top], sp" : [top] "=r" (top) : : );
__asm__ volatile ("mov %[top], sp" : [top] "=r" (top) : : );
dst = &_sstack;
while (dst < top) {
*(dst++) = STACK_CANARY_WORD;
@ -137,7 +137,7 @@ void nmi_default(void)
static inline int _stack_size_left(uint32_t required)
{
uint32_t* sp;
asm volatile ("mov %[sp], sp" : [sp] "=r" (sp) : : );
__asm__ volatile ("mov %[sp], sp" : [sp] "=r" (sp) : : );
return ((int)((uint32_t)sp - (uint32_t)&_sstack) - required);
}
@ -147,7 +147,7 @@ void hard_fault_handler(uint32_t* sp, uint32_t corrupted, uint32_t exc_return, u
__attribute__((naked)) void hard_fault_default(void)
{
/* Get stack pointer where exception stack frame lies */
__ASM volatile
__asm__ volatile
(
/* Check that msp is valid first because we want to stack all the
* r4-r11 registers so that we can use r0, r1, r2, r3 for other things. */
@ -275,7 +275,7 @@ __attribute__((used)) void hard_fault_handler(uint32_t* sp, uint32_t corrupted,
if(stack_left < 0) {
printf("\nISR stack overflowed by at least %d bytes.\n", (-1 * stack_left));
}
__ASM volatile (
__asm__ volatile (
"mov r0, %[sp]\n"
"ldr r2, [r0, #8]\n"
"ldr r3, [r0, #12]\n"

2
cpu/ezr32wg/vectors.c

@ -27,7 +27,7 @@ extern uint32_t _estack;
/* define a local dummy handler as it needs to be in the same compilation unit
* as the alias definition */
void dummy_handler(void) {
asm("b dummy_handler_default");
__asm__("b dummy_handler_default");
}
/* Cortex-M common interrupt vectors */

2
cpu/k60/cpu.c

@ -64,7 +64,7 @@ static void check_running_cpu_revision(void)
* between silicon revision 1.x and 2.x (LSB of CPUID) */
/* If you unexpectedly end up on this line when debugging:
* Rebuild the code using the correct value for K60_CPU_REV */
__ASM volatile ("bkpt #99\n");
__asm__ volatile ("bkpt #99\n");
while (1);
}

2
cpu/k60/ssp.c

@ -46,7 +46,7 @@ void __attribute__((section(".preinit_array")))(*preinit__stack_chk_guard_setup[
*/
void __attribute__((noreturn)) __stack_chk_fail(void)
{
asm volatile ("bkpt #1");
__asm__ volatile ("bkpt #1");
while (1);
}

4
cpu/x86/include/cpu.h

@ -59,8 +59,8 @@ extern "C" {
static inline void __attribute__((always_inline, noreturn)) x86_hlt(void)
{
while (1) {
asm volatile ("cli");
asm volatile ("hlt");
__asm__ volatile ("cli");
__asm__ volatile ("hlt");
}
}

4
cpu/x86/include/x86_interrupts.h

@ -139,7 +139,7 @@ void x86_interrupt_handler_set(unsigned num, x86_intr_handler_t handler);
static inline unsigned long __attribute__((always_inline)) x86_pushf_cli(void)
{
unsigned long result;
asm volatile("pushf; cli; pop %0" : "=g"(result));
__asm__ volatile("pushf; cli; pop %0" : "=g"(result));
return result;
}
@ -148,7 +148,7 @@ static inline unsigned long __attribute__((always_inline)) x86_pushf_cli(void)
*/
static inline void __attribute__((always_inline)) x86_restore_flags(unsigned long stored_value)
{
asm volatile("push %0; popf" :: "g"(stored_value));
__asm__ volatile("push %0; popf" :: "g"(stored_value));
}
/**

26
cpu/x86/include/x86_ports.h

@ -42,7 +42,7 @@ static inline uint8_t __attribute__((always_inline)) inb(uint16_t port)
{
/* See [IA32-v2a] "IN". */
uint8_t data;
asm volatile("inb %w1, %b0" : "=a"(data) : "Nd"(port));
__asm__ volatile("inb %w1, %b0" : "=a"(data) : "Nd"(port));
return data;
}
@ -55,7 +55,7 @@ static inline uint8_t __attribute__((always_inline)) inb(uint16_t port)
static inline void __attribute__((always_inline)) insb(uint16_t port, void *addr, size_t cnt)
{
/* See [IA32-v2a] "INS". */
asm volatile("rep insb" : "+D"(addr), "+c"(cnt) : "d"(port) : "memory");
__asm__ volatile("rep insb" : "+D"(addr), "+c"(cnt) : "d"(port) : "memory");
}
/**
@ -67,7 +67,7 @@ static inline uint16_t __attribute__((always_inline)) inw(uint16_t port)
{
uint16_t data;
/* See [IA32-v2a] "IN". */
asm volatile("inw %w1, %w0" : "=a"(data) : "Nd"(port));
__asm__ volatile("inw %w1, %w0" : "=a"(data) : "Nd"(port));
return data;
}
@ -80,7 +80,7 @@ static inline uint16_t __attribute__((always_inline)) inw(uint16_t port)
static inline void __attribute__((always_inline)) insw(uint16_t port, void *addr, size_t cnt)
{
/* See [IA32-v2a] "INS". */
asm volatile("rep insw" : "+D"(addr), "+c"(cnt) : "d"(port) : "memory");
__asm__ volatile("rep insw" : "+D"(addr), "+c"(cnt) : "d"(port) : "memory");
}
/**
@ -92,7 +92,7 @@ static inline uint32_t __attribute__((always_inline)) inl(uint16_t port)
{
/* See [IA32-v2a] "IN". */
uint32_t data;
asm volatile("inl %w1, %0" : "=a"(data) : "Nd"(port));
__asm__ volatile("inl %w1, %0" : "=a"(data) : "Nd"(port));
return data;
}
@ -105,7 +105,7 @@ static inline uint32_t __attribute__((always_inline)) inl(uint16_t port)
static inline void __attribute__((always_inline)) insl(uint16_t port, void *addr, size_t cnt)
{
/* See [IA32-v2a] "INS". */
asm volatile("rep insl" : "+D"(addr), "+c"(cnt) : "d"(port) : "memory");
__asm__ volatile("rep insl" : "+D"(addr), "+c"(cnt) : "d"(port) : "memory");
}
/**
@ -116,7 +116,7 @@ static inline void __attribute__((always_inline)) insl(uint16_t port, void *addr
static inline void __attribute__((always_inline)) outb(uint16_t port, uint8_t data)
{
/* See [IA32-v2b] "OUT". */
asm volatile("outb %b0, %w1" : : "a"(data), "Nd"(port));
__asm__ volatile("outb %b0, %w1" : : "a"(data), "Nd"(port));
}
/**
@ -128,7 +128,7 @@ static inline void __attribute__((always_inline)) outb(uint16_t port, uint8_t da
static inline void __attribute__((always_inline)) outsb(uint16_t port, const void *addr, size_t cnt)
{
/* See [IA32-v2b] "OUTS". */
asm volatile("rep outsb" : "+S"(addr), "+c"(cnt) : "d"(port));
__asm__ volatile("rep outsb" : "+S"(addr), "+c"(cnt) : "d"(port));
}
/**
@ -139,7 +139,7 @@ static inline void __attribute__((always_inline)) outsb(uint16_t port, const voi
static inline void __attribute__((always_inline)) outw(uint16_t port, uint16_t data)
{
/* See [IA32-v2b] "OUT". */
asm volatile("outw %w0, %w1" : : "a"(data), "Nd"(port));
__asm__ volatile("outw %w0, %w1" : : "a"(data), "Nd"(port));
}
/**
@ -151,7 +151,7 @@ static inline void __attribute__((always_inline)) outw(uint16_t port, uint16_t d
static inline void __attribute__((always_inline)) outsw(uint16_t port, const void *addr, size_t cnt)
{
/* See [IA32-v2b] "OUTS". */
asm volatile("rep outsw" : "+S"(addr), "+c"(cnt) : "d"(port));
__asm__ volatile("rep outsw" : "+S"(addr), "+c"(cnt) : "d"(port));
}
/**
@ -162,7 +162,7 @@ static inline void __attribute__((always_inline)) outsw(uint16_t port, const voi
static inline void __attribute__((always_inline)) outl(uint16_t port, uint32_t data)
{
/* See [IA32-v2b] "OUT". */
asm volatile("outl %0, %w1" : : "a"(data), "Nd"(port));
__asm__ volatile("outl %0, %w1" : : "a"(data), "Nd"(port));
}
/**
@ -174,7 +174,7 @@ static inline void __attribute__((always_inline)) outl(uint16_t port, uint32_t d
static inline void __attribute__((always_inline)) outsl(uint16_t port, const void *addr, size_t cnt)
{
/* See [IA32-v2b] "OUTS". */
asm volatile("rep outsl" : "+S"(addr), "+c"(cnt) : "d"(port));
__asm__ volatile("rep outsl" : "+S"(addr), "+c"(cnt) : "d"(port));
}
/**
@ -182,7 +182,7 @@ static inline void __attribute__((always_inline)) outsl(uint16_t port, const voi
*/
static inline void __attribute__((always_inline)) io_wait(void)
{
asm volatile(" jmp 1f\n"
__asm__ volatile(" jmp 1f\n"
"1: jmp 2f\n"
"2:");
}

20
cpu/x86/include/x86_registers.h

@ -74,7 +74,7 @@ extern "C" {
static inline uint32_t X86_CR_ATTR cr0_read(void)
{
uint32_t result;
asm volatile ("mov %%cr0, %%eax" : "=a"(result));
__asm__ volatile ("mov %%cr0, %%eax" : "=a"(result));
return result;
}
@ -87,7 +87,7 @@ static inline uint32_t X86_CR_ATTR cr0_read(void)
*/
static inline void X86_CR_ATTR cr0_write(uint32_t value)
{
asm volatile ("mov %%eax, %%cr0" :: "a"(value));
__asm__ volatile ("mov %%eax, %%cr0" :: "a"(value));
}
/**
@ -99,7 +99,7 @@ static inline void X86_CR_ATTR cr0_write(uint32_t value)
static inline uint32_t X86_CR_ATTR cr2_read(void)
{
uint32_t result;
asm volatile ("mov %%cr2, %%eax" : "=a"(result));
__asm__ volatile ("mov %%cr2, %%eax" : "=a"(result));
return result;
}
@ -111,7 +111,7 @@ static inline uint32_t X86_CR_ATTR cr2_read(void)
static inline uint32_t X86_CR_ATTR cr3_read(void)
{
uint32_t result;
asm volatile ("mov %%cr3, %%eax" : "=a"(result));
__asm__ volatile ("mov %%cr3, %%eax" : "=a"(result));
return result;
}
@ -122,7 +122,7 @@ static inline uint32_t X86_CR_ATTR cr3_read(void)
*/
static inline void X86_CR_ATTR cr3_write(uint32_t value)
{
asm volatile ("mov %%eax, %%cr3" :: "a"(value));
__asm__ volatile ("mov %%eax, %%cr3" :: "a"(value));
}
/**
@ -131,7 +131,7 @@ static inline void X86_CR_ATTR cr3_write(uint32_t value)
static inline uint32_t X86_CR_ATTR cr4_read(void)
{
uint32_t result;
asm volatile ("mov %%cr4, %%eax" : "=a"(result));
__asm__ volatile ("mov %%cr4, %%eax" : "=a"(result));
return result;
}
@ -144,7 +144,7 @@ static inline uint32_t X86_CR_ATTR cr4_read(void)
*/
static inline void X86_CR_ATTR cr4_write(uint32_t value)
{
asm volatile ("mov %%eax, %%cr4" :: "a"(value));
__asm__ volatile ("mov %%eax, %%cr4" :: "a"(value));
}
#define EFER_SCE (1u << 0)
@ -163,7 +163,7 @@ static inline void X86_CR_ATTR cr4_write(uint32_t value)
static inline uint64_t X86_CR_ATTR msr_read(uint32_t msr)
{
uint32_t eax, edx;
asm volatile (
__asm__ volatile (
"rdmsr"
: "=a"(eax), "=d"(edx)
: "c"(msr)
@ -180,7 +180,7 @@ static inline uint64_t X86_CR_ATTR msr_read(uint32_t msr)
*/
static inline void X86_CR_ATTR msr_set(uint32_t msr, uint64_t value)
{
asm volatile (
__asm__ volatile (
"wrmsr"
:: "a"((uint32_t) value), "d"((uint32_t) (value >> 32)), "c"(msr)
);
@ -250,7 +250,7 @@ static inline void X86_CR_ATTR msr_set(uint32_t msr, uint64_t value)
static inline uint64_t X86_CR_ATTR cpuid_caps(void)
{
uint32_t edx, ecx;
asm volatile ("cpuid" : "=d"(edx), "=c"(ecx) : "a"(1) : "ebx");
__asm__ volatile ("cpuid" : "=d"(edx), "=c"(ecx) : "a"(1) : "ebx");
return ((uint64_t) ecx << 32) | edx;
}

2
cpu/x86/x86_atomic.c

@ -34,7 +34,7 @@
int atomic_cas(atomic_int_t *dest, int known_value, int new_value)
{
uint8_t successful;
asm volatile ("lock cmpxchgl %2, %0\n"
__asm__ volatile ("lock cmpxchgl %2, %0\n"
"seteb %1"
: "+m"(ATOMIC_VALUE(*dest)), "=g"(successful)
: "r"(new_value), "a"(known_value)

78
cpu/x86/x86_interrupts.c

@ -158,7 +158,7 @@ static void continue_after_intr(void)
{
ucontext_t *ctx = (ucontext_t *) sched_active_thread->sp;
x86_interrupted_ctx = ctx->uc_context.registers;
asm volatile (
__asm__ volatile (
"push %0\n" /* flags */
"push $0x0008\n" /* cs */
"push %1\n" /* ip */
@ -204,7 +204,7 @@ void x86_int_handler(void)
ctx->uc_context.registers = x86_interrupted_ctx;
ctx->uc_stack.ss_sp = x86_interrupt_handler_stack;
ctx->uc_stack.ss_size = sizeof x86_interrupt_handler_stack;
asm volatile ("pushf; pop %0" : "=g"(ctx->uc_context.flags));
__asm__ volatile ("pushf; pop %0" : "=g"(ctx->uc_context.flags));
ctx->uc_context.ip = (void *) (uintptr_t) &continue_after_intr;
ctx->__intr.ip = sp[0];
ctx->__intr.flags = sp[2];
@ -215,56 +215,56 @@ void x86_int_handler(void)
void ASM_FUN_ATTRIBUTES NORETURN x86_int_entry(void)
{
asm volatile ("mov %eax, (4*0 + x86_interrupted_ctx)");
asm volatile ("mov %ecx, (4*1 + x86_interrupted_ctx)");
asm volatile ("mov %edx, (4*2 + x86_interrupted_ctx)");
asm volatile ("mov %ebx, (4*3 + x86_interrupted_ctx)");
asm volatile ("mov %ebp, (4*5 + x86_interrupted_ctx)");
asm volatile ("mov %esi, (4*6 + x86_interrupted_ctx)");
asm volatile ("mov %edi, (4*7 + x86_interrupted_ctx)");
asm volatile ("jnc 1f");
asm volatile (" mov (%esp), %eax");
asm volatile (" add $4, %esp");
asm volatile (" jmp 2f");
asm volatile ("1:");
asm volatile (" xor %eax, %eax");
asm volatile ("2:");
asm volatile (" mov %eax, x86_current_interrupt_error_code");
asm volatile ("mov %esp, (4*4 + x86_interrupted_ctx)");
asm volatile ("mov %0, %%esp" :: "g"(&x86_interrupt_handler_stack[sizeof x86_interrupt_handler_stack]));
asm volatile ("call x86_int_handler");
asm volatile ("jmp x86_int_exit");
__asm__ volatile ("mov %eax, (4*0 + x86_interrupted_ctx)");
__asm__ volatile ("mov %ecx, (4*1 + x86_interrupted_ctx)");
__asm__ volatile ("mov %edx, (4*2 + x86_interrupted_ctx)");
__asm__ volatile ("mov %ebx, (4*3 + x86_interrupted_ctx)");
__asm__ volatile ("mov %ebp, (4*5 + x86_interrupted_ctx)");
__asm__ volatile ("mov %esi, (4*6 + x86_interrupted_ctx)");
__asm__ volatile ("mov %edi, (4*7 + x86_interrupted_ctx)");
__asm__ volatile ("jnc 1f");
__asm__ volatile (" mov (%esp), %eax");
__asm__ volatile (" add $4, %esp");
__asm__ volatile (" jmp 2f");
__asm__ volatile ("1:");
__asm__ volatile (" xor %eax, %eax");
__asm__ volatile ("2:");
__asm__ volatile (" mov %eax, x86_current_interrupt_error_code");
__asm__ volatile ("mov %esp, (4*4 + x86_interrupted_ctx)");
__asm__ volatile ("mov %0, %%esp" :: "g"(&x86_interrupt_handler_stack[sizeof x86_interrupt_handler_stack]));
__asm__ volatile ("call x86_int_handler");
__asm__ volatile ("jmp x86_int_exit");
__builtin_unreachable();
}
void ASM_FUN_ATTRIBUTES NORETURN x86_int_exit(void)
{
asm volatile ("mov (4*0 + x86_interrupted_ctx), %eax");
asm volatile ("mov (4*1 + x86_interrupted_ctx), %ecx");
asm volatile ("mov (4*2 + x86_interrupted_ctx), %edx");
asm volatile ("mov (4*3 + x86_interrupted_ctx), %ebx");
asm volatile ("mov (4*5 + x86_interrupted_ctx), %ebp");
asm volatile ("mov (4*6 + x86_interrupted_ctx), %esi");
asm volatile ("mov (4*7 + x86_interrupted_ctx), %edi");
asm volatile ("mov (4*4 + x86_interrupted_ctx), %esp");
asm volatile ("iret");
__asm__ volatile ("mov (4*0 + x86_interrupted_ctx), %eax");
__asm__ volatile ("mov (4*1 + x86_interrupted_ctx), %ecx");
__asm__ volatile ("mov (4*2 + x86_interrupted_ctx), %edx");
__asm__ volatile ("mov (4*3 + x86_interrupted_ctx), %ebx");
__asm__ volatile ("mov (4*5 + x86_interrupted_ctx), %ebp");
__asm__ volatile ("mov (4*6 + x86_interrupted_ctx), %esi");
__asm__ volatile ("mov (4*7 + x86_interrupted_ctx), %edi");
__asm__ volatile ("mov (4*4 + x86_interrupted_ctx), %esp");
__asm__ volatile ("iret");
__builtin_unreachable();
}
#define DECLARE_INT(NUM, HAS_ERROR_CODE, MNEMONIC) \
static void ASM_FUN_ATTRIBUTES NORETURN x86_int_entry_##NUM##h(void) \
{ \
asm volatile ("movb %0, x86_current_interrupt" :: "n"(0x##NUM)); \
__asm__ volatile ("movb %0, x86_current_interrupt" :: "n"(0x##NUM)); \
if ((HAS_ERROR_CODE)) { \
asm volatile ("stc"); \
__asm__ volatile ("stc"); \
} \
else { \
asm volatile ("clc"); \
__asm__ volatile ("clc"); \
}\
asm volatile ("jmp x86_int_entry"); \
__asm__ volatile ("jmp x86_int_entry"); \
__builtin_unreachable(); \
}
DECLARE_INT(00, 0, "#DE")
@ -342,7 +342,7 @@ static void test_int_bp(void)
unsigned long si;
unsigned long di;
unsigned long eflags_before, eflags_after;
asm volatile (
__asm__ volatile (
"mov %8, %%esi\n"
"mov %9, %%edi\n"
"pushf; pop %6\n"
@ -416,7 +416,7 @@ static void load_interrupt_descriptor_table(void)
SET_IDT_DESC(2e, 0, "PIC ATA1", 0)
SET_IDT_DESC(2f, 0, "PIC ATA2", 0)
asm volatile ("lidt %0" :: "m"(idtr));
__asm__ volatile ("lidt %0" :: "m"(idtr));
}
void x86_init_interrupts(void)

22
cpu/x86/x86_memory.c

@ -98,17 +98,17 @@ void x86_init_gdt(void)
.offset = (unsigned long) &gdt_entries[0],
};
asm volatile ("" :: "a"(0x0010));
__asm__ volatile ("" :: "a"(0x0010));
asm volatile ("lgdt %0" :: "m"(gdt));
asm volatile ("ljmp $0x0008, $1f\n"
__asm__ volatile ("lgdt %0" :: "m"(gdt));
__asm__ volatile ("ljmp $0x0008, $1f\n"
"1:");
asm volatile ("mov %ax, %ds");
asm volatile ("mov %ax, %es");
asm volatile ("mov %ax, %fs");
asm volatile ("mov %ax, %gs");
asm volatile ("mov %ax, %ss");
__asm__ volatile ("mov %ax, %ds");
__asm__ volatile ("mov %ax, %es");
__asm__ volatile ("mov %ax, %fs");
__asm__ volatile ("mov %ax, %gs");
__asm__ volatile ("mov %ax, %ss");
}
/* Addresses in PDPT, PD, and PT are linear addresses. */
@ -193,7 +193,7 @@ static void init_pagetable(void)
static void set_temp_page(uint64_t addr)
{
static_pts[TEMP_PAGE_PT][TEMP_PAGE_PTE] = addr != -1ull ? addr | PT_P | PT_RW | pt_xd : 0;
asm volatile ("invlpg (%0)" :: "r"(&TEMP_PAGE));
__asm__ volatile ("invlpg (%0)" :: "r"(&TEMP_PAGE));
}
static inline uint64_t min64(uint64_t a, uint64_t b)
@ -351,7 +351,7 @@ static void pagefault_handler(uint8_t intr_num, struct x86_pushad *orig_ctx, uns
else if ((pte != NO_PTE) && !(pte & PT_P) && (pte & PT_HEAP_BIT)) {
/* mark as present */
TEMP_PAGE.indices[(virtual_addr >> 12) % 512] |= PT_P;
asm volatile ("invlpg (%0)" :: "r"(virtual_addr));
__asm__ volatile ("invlpg (%0)" :: "r"(virtual_addr));
/* initialize for easier debugging */
uint32_t *p = (uint32_t *) (virtual_addr & ~0xfff);
@ -429,7 +429,7 @@ static void virtual_pages_set_bits(uint32_t virtual_addr, unsigned pages, uint64
uint64_t old_physical_addr = x86_get_pte(virtual_addr) & PT_ADDR_MASK;
TEMP_PAGE.indices[pte_i] = old_physical_addr | bits;
asm volatile ("invlpg (%0)" :: "r"(virtual_addr));
__asm__ volatile ("invlpg (%0)" :: "r"(virtual_addr));
virtual_addr += 0x1000;
}

8
cpu/x86/x86_reboot.c

@ -50,7 +50,7 @@ static const struct idtr_t EMPTY_IDT = {
void x86_load_empty_idt(void)
{
asm volatile ("lidt %0" :: "m"(EMPTY_IDT));
__asm__ volatile ("lidt %0" :: "m"(EMPTY_IDT));
}
static bool fail_violently;
@ -62,7 +62,7 @@ void NORETURN x86_kbc_reboot(void)
while (1) {
if (fail_violently) {
asm volatile ("int3"); /* Cause a tripple fault. Won't return. */
__asm__ volatile ("int3"); /* Cause a tripple fault. Won't return. */
}
fail_violently = true;
@ -79,7 +79,7 @@ void NORETURN x86_kbc_reboot(void)
}
}
asm volatile ("int3"); /* Cause a tripple fault. Won't return. */
__asm__ volatile ("int3"); /* Cause a tripple fault. Won't return. */
}
}
@ -88,7 +88,7 @@ static bool reboot_twice;
void reboot(void)
{
asm volatile ("cli");
__asm__ volatile ("cli");
if (!reboot_twice) {
reboot_twice = true;
if (reboot_fun) {

2
cpu/x86/x86_rtc.c

@ -164,7 +164,7 @@ bool x86_rtc_read(x86_rtc_data_t *dest)
unsigned old_status = irq_disable();
while (is_update_in_progress()) {
asm volatile ("pause");
__asm__ volatile ("pause");
}
uint8_t b = x86_cmos_read(RTC_REG_B);

14
cpu/x86/x86_threading.c

@ -70,17 +70,17 @@ unsigned irq_disable(void)
unsigned irq_enable(void)
{
unsigned long eflags;
asm volatile ("pushf; pop %0; sti" : "=g"(eflags));
__asm__ volatile ("pushf; pop %0; sti" : "=g"(eflags));
return (eflags & X86_IF) != 0;
}
void irq_restore(unsigned state)
{
if (state) {
asm volatile ("sti");
__asm__ volatile ("sti");
}
else {
asm volatile ("cli");
__asm__ volatile ("cli");
}
}
@ -184,7 +184,7 @@ static void fpu_used_interrupt(uint8_t intr_num, struct x86_pushad *orig_ctx, un
(void) orig_ctx;
(void) error_code;
asm volatile ("clts"); /* clear task switch flag */
__asm__ volatile ("clts"); /* clear task switch flag */
if (fpu_owner == sched_active_pid) {
return;
@ -192,13 +192,13 @@ static void fpu_used_interrupt(uint8_t intr_num, struct x86_pushad *orig_ctx, un
if (fpu_owner != KERNEL_PID_UNDEF) {
ucontext_t *ctx_owner = (ucontext_t *) sched_threads[fpu_owner]->sp;
asm volatile ("fxsave (%0)" :: "r"(&fpu_data));
__asm__ volatile ("fxsave (%0)" :: "r"(&fpu_data));
ctx_owner->__fxsave = fpu_data;
}
ucontext_t *ctx_active = (ucontext_t *) sched_active_thread->sp;
fpu_data = ctx_active->__fxsave;
asm volatile ("fxrstor (%0)" :: "r"(&fpu_data));
__asm__ volatile ("fxrstor (%0)" :: "r"(&fpu_data));
fpu_owner = sched_active_pid;
}
@ -220,7 +220,7 @@ void x86_init_threading(void)
makecontext(&end_context, x86_thread_exit, 0);
x86_interrupt_handler_set(X86_INT_NM, fpu_used_interrupt);
asm volatile ("fxsave (%0)" :: "r"(&initial_fpu_state));
__asm__ volatile ("fxsave (%0)" :: "r"(&initial_fpu_state));
DEBUG("Threading initialized\n");
}

4
cpu/x86/x86_uart.c

@ -72,7 +72,7 @@ ssize_t x86_uart_write(const char *buf, size_t len)
size_t written = 0;
while (written < len) {
while (!is_output_empty()) {
asm volatile ("pause");
__asm__ volatile ("pause");
}
outb(UART_PORT + THR, buf[written]);
++written;
@ -89,7 +89,7 @@ ssize_t x86_uart_read(char *buf, size_t len)
size_t read = 0;
while (read < len) {
while (!is_input_empty()) {
asm volatile ("pause");
__asm__ volatile ("pause");
}
buf[read] = inb(UART_PORT + RBR);
++read;

110
cpu/x86/x86_ucontext.c

@ -34,79 +34,79 @@
int __attribute__((optimize("omit-frame-pointer"), no_instrument_function)) getcontext(ucontext_t *ucp)
{
asm volatile ("pushf\n" :: "a"(ucp));
asm volatile ("pop 4*2(%eax)\n");
asm volatile ("mov %eax, 4*3(%eax)\n");
asm volatile ("mov %ecx, 4*4(%eax)\n");
asm volatile ("mov %edx, 4*5(%eax)\n");
asm volatile ("mov %ebx, 4*6(%eax)\n");
/* asm volatile ("mov %esp, 4*7(%eax)\n"); omitted */
asm volatile ("mov %ebp, 4*8(%eax)\n");
asm volatile ("mov %esi, 4*9(%eax)\n");
asm volatile ("mov %edi, 4*10(%eax)\n");
asm volatile ("lea 4(%esp), %edx\n");
asm volatile ("mov %edx, 4*0(%eax)\n");
asm volatile ("xor %edx, %edx\n");
asm volatile ("mov %edx, 4*1(%eax)\n");
asm volatile ("mov (%esp), %edx\n");
asm volatile ("mov %edx, 4*11(%eax)\n");
__asm__ volatile ("pushf\n" :: "a"(ucp));
__asm__ volatile ("pop 4*2(%eax)\n");
__asm__ volatile ("mov %eax, 4*3(%eax)\n");
__asm__ volatile ("mov %ecx, 4*4(%eax)\n");
__asm__ volatile ("mov %edx, 4*5(%eax)\n");
__asm__ volatile ("mov %ebx, 4*6(%eax)\n");
/* __asm__ volatile ("mov %esp, 4*7(%eax)\n"); omitted */
__asm__ volatile ("mov %ebp, 4*8(%eax)\n");
__asm__ volatile ("mov %esi, 4*9(%eax)\n");
__asm__ volatile ("mov %edi, 4*10(%eax)\n");
__asm__ volatile ("lea 4(%esp), %edx\n");
__asm__ volatile ("mov %edx, 4*0(%eax)\n");
__asm__ volatile ("xor %edx, %edx\n");
__asm__ volatile ("mov %edx, 4*1(%eax)\n");
__asm__ volatile ("mov (%esp), %edx\n");
__asm__ volatile ("mov %edx, 4*11(%eax)\n");
return 0;
}
int __attribute__((optimize("omit-frame-pointer"), no_instrument_function)) setcontext(const ucontext_t *ucp)
{
asm volatile ("1:\n" :: "a"(ucp));
__asm__ volatile ("1:\n" :: "a"(ucp));
/* asm volatile ("mov 4*3(%eax), %eax\n");, omitted */
asm volatile ("mov 4*4(%eax), %ecx\n");
/* asm volatile ("mov 4*5(%eax), %edx\n");, omitted */
asm volatile ("mov 4*6(%eax), %ebx\n");
/* asm volatile ("mov 4*7(%eax), %esp\n");, omitted */
asm volatile ("mov 4*8(%eax), %ebp\n");
asm volatile ("mov 4*9(%eax), %esi\n");
asm volatile ("mov 4*10(%eax), %edi\n");
/* __asm__ volatile ("mov 4*3(%eax), %eax\n");, omitted */
__asm__ volatile ("mov 4*4(%eax), %ecx\n");
/* __asm__ volatile ("mov 4*5(%eax), %edx\n");, omitted */
__asm__ volatile ("mov 4*6(%eax), %ebx\n");
/* __asm__ volatile ("mov 4*7(%eax), %esp\n");, omitted */
__asm__ volatile ("mov 4*8(%eax), %ebp\n");
__asm__ volatile ("mov 4*9(%eax), %esi\n");
__asm__ volatile ("mov 4*10(%eax), %edi\n");
asm volatile ("mov 4*0(%eax), %esp\n");
asm volatile ("add 4*1(%eax), %esp\n");
__asm__ volatile ("mov 4*0(%eax), %esp\n");
__asm__ volatile ("add 4*1(%eax), %esp\n");
asm volatile ("mov 4*11(%eax), %edx\n");
asm volatile ("mov %eax, %ebx\n");
__asm__ volatile ("mov 4*11(%eax), %edx\n");
__asm__ volatile ("mov %eax, %ebx\n");
asm volatile ("push 4*2(%eax)\n");
asm volatile ("popf\n");
__asm__ volatile ("push 4*2(%eax)\n");
__asm__ volatile ("popf\n");
asm volatile ("call *%edx\n");
__asm__ volatile ("call *%edx\n");
asm volatile ("mov 4*12(%ebx), %eax\n");
asm volatile ("jmp 1b\n");
__asm__ volatile ("mov 4*12(%ebx), %eax\n");
__asm__ volatile ("jmp 1b\n");
__builtin_unreachable();
}
static void __attribute__((optimize("omit-frame-pointer"), noreturn, no_instrument_function)) makecontext_entrypoint(void)
{
/* ebx = ucp, ecx = argc, ebp = arg[0], esi = arg[1], edi = arg[2] */
asm volatile ("mov 4*3(%ebx), %eax\n"); /* eax = func */
asm volatile ("jecxz 0f\n");
asm volatile ("cmpb $1, %cl; je 1f\n");
asm volatile ("cmpb $2, %cl; je 2f\n");
asm volatile ("cmpb $3, %cl; je 3f\n");
asm volatile ("cmpb $4, %cl; je 4f\n");
asm volatile (" mov 4*7(%ebx), %edx; push %edx\n");
asm volatile ("4: mov 4*5(%ebx), %edx; push %edx\n");
asm volatile ("3: push %edi\n");
asm volatile ("2: push %esi\n");
asm volatile ("1: push %ebp\n");
asm volatile ("0: call *%eax\n"); /* call func(...), preserves ebx */
asm volatile ("mov 4*12(%ebx), %eax\n");
asm volatile ("push %eax\n");
asm volatile ("call setcontext\n");
__asm__ volatile ("mov 4*3(%ebx), %eax\n"); /* eax = func */
__asm__ volatile ("jecxz 0f\n");
__asm__ volatile ("cmpb $1, %cl; je 1f\n");
__asm__ volatile ("cmpb $2, %cl; je 2f\n");
__asm__ volatile ("cmpb $3, %cl; je 3f\n");
__asm__ volatile ("cmpb $4, %cl; je 4f\n");
__asm__ volatile (" mov 4*7(%ebx), %edx; push %edx\n");
__asm__ volatile ("4: mov 4*5(%ebx), %edx; push %edx\n");
__asm__ volatile ("3: push %edi\n");
__asm__ volatile ("2: push %esi\n");
__asm__ volatile ("1: push %ebp\n");
__asm__ volatile ("0: call *%eax\n"); /* call func(...), preserves ebx */
__asm__ volatile ("mov 4*12(%ebx), %eax\n");
__asm__ volatile ("push %eax\n");
__asm__ volatile ("call setcontext\n");
__builtin_unreachable();
}

2
tests/fault_handler/main.c

@ -27,7 +27,7 @@
#endif /* !defined(FORBIDDEN_ADDRESS) */
#ifndef INVALID_INSTRUCTION
/* Random garbage may crash the program as well. */
#define INVALID_INSTRUCTION asm volatile (".short 0xdead, 0xbeef, 0xcafe, 0xbabe\n")
#define INVALID_INSTRUCTION __asm__ volatile (".short 0xdead, 0xbeef, 0xcafe, 0xbabe\n")
#endif /* !defined(INVALID_INSTRUCTION) */
#define PRINT_MACRO(a) PRINT_MACRO2(a)

2
tests/leds/main.c

@ -33,7 +33,7 @@
void dumb_delay(uint32_t delay)
{
for (uint32_t i = 0; i < delay; i++) {
asm("nop");
__asm__("nop");
}
}

Loading…
Cancel
Save