Login | Register For Free | Help
Search for: (Advanced)

Mailing List Archive: Xen: Changelog

[xen-unstable] x86: fix wait code asm() constraints

 

 

Xen changelog RSS feed   Index | Next | Previous | View Threaded


patchbot at xen

Aug 7, 2012, 1:33 PM

Post #1 of 1 (48 views)
Permalink
[xen-unstable] x86: fix wait code asm() constraints

# HG changeset patch
# User Jan Beulich <jbeulich [at] suse>
# Date 1344244723 -7200
# Node ID 6221bee07e5dc31d9ed9eb3c0c1c94a80c56c78d
# Parent 6ccad16b50b6b402a98ddfeb9ac6fa7522119778
x86: fix wait code asm() constraints

This fixes theoretical issues with those constraints - operands that
get clobbered before consuming all input operands must be marked so
according the the gcc documentation. Beyond that, the change is merely
code improvement, not a bug fix.

In __prepare_to_wait(), properly mark early clobbered registers. By
doing so, we at once eliminate the need to save/restore rCX and rDI.

In check_wakeup_from_wait(), make the current constraints match by
removing the code that actuall alters registers. By adjusting the
resume address in __prepare_to_wait(), we can simply re-use the copying
operation there (rather than doing a second pointless copy in the
opposite direction after branching to the resume point), which at once
eliminates the need for re-loading rCX and rDI inside the asm().

Signed-off-by: Jan Beulich <jbeulich [at] suse>
Acked-by: Keir Fraser <keir [at] xen>
---


diff -r 6ccad16b50b6 -r 6221bee07e5d xen/common/wait.c
--- a/xen/common/wait.c Fri Aug 03 12:25:29 2012 +0100
+++ b/xen/common/wait.c Mon Aug 06 11:18:43 2012 +0200
@@ -126,6 +126,7 @@ static void __prepare_to_wait(struct wai
{
char *cpu_info = (char *)get_cpu_info();
struct vcpu *curr = current;
+ unsigned long dummy;

ASSERT(wqv->esp == 0);

@@ -140,27 +141,27 @@ static void __prepare_to_wait(struct wai

asm volatile (
#ifdef CONFIG_X86_64
- "push %%rax; push %%rbx; push %%rcx; push %%rdx; push %%rdi; "
+ "push %%rax; push %%rbx; push %%rdx; "
"push %%rbp; push %%r8; push %%r9; push %%r10; push %%r11; "
"push %%r12; push %%r13; push %%r14; push %%r15; call 1f; "
- "1: mov 80(%%rsp),%%rdi; mov 96(%%rsp),%%rcx; mov %%rsp,%%rsi; "
+ "1: mov %%rsp,%%rsi; addq $2f-1b,(%%rsp); "
"sub %%rsi,%%rcx; cmp %3,%%rcx; jbe 2f; "
"xor %%esi,%%esi; jmp 3f; "
"2: rep movsb; mov %%rsp,%%rsi; 3: pop %%rax; "
"pop %%r15; pop %%r14; pop %%r13; pop %%r12; "
"pop %%r11; pop %%r10; pop %%r9; pop %%r8; "
- "pop %%rbp; pop %%rdi; pop %%rdx; pop %%rcx; pop %%rbx; pop %%rax"
+ "pop %%rbp; pop %%rdx; pop %%rbx; pop %%rax"
#else
- "push %%eax; push %%ebx; push %%ecx; push %%edx; push %%edi; "
+ "push %%eax; push %%ebx; push %%edx; "
"push %%ebp; call 1f; "
- "1: mov 8(%%esp),%%edi; mov 16(%%esp),%%ecx; mov %%esp,%%esi; "
+ "1: mov %%esp,%%esi; addl $2f-1b,(%%esp); "
"sub %%esi,%%ecx; cmp %3,%%ecx; jbe 2f; "
"xor %%esi,%%esi; jmp 3f; "
"2: rep movsb; mov %%esp,%%esi; 3: pop %%eax; "
- "pop %%ebp; pop %%edi; pop %%edx; pop %%ecx; pop %%ebx; pop %%eax"
+ "pop %%ebp; pop %%edx; pop %%ebx; pop %%eax"
#endif
- : "=S" (wqv->esp)
- : "c" (cpu_info), "D" (wqv->stack), "i" (PAGE_SIZE)
+ : "=&S" (wqv->esp), "=&c" (dummy), "=&D" (dummy)
+ : "i" (PAGE_SIZE), "1" (cpu_info), "2" (wqv->stack)
: "memory" );

if ( unlikely(wqv->esp == 0) )
@@ -200,7 +201,7 @@ void check_wakeup_from_wait(void)
}

asm volatile (
- "mov %1,%%"__OP"sp; rep movsb; jmp *(%%"__OP"sp)"
+ "mov %1,%%"__OP"sp; jmp *(%0)"
: : "S" (wqv->stack), "D" (wqv->esp),
"c" ((char *)get_cpu_info() - (char *)wqv->esp)
: "memory" );

_______________________________________________
Xen-changelog mailing list
Xen-changelog [at] lists
http://lists.xensource.com/xen-changelog

Xen changelog RSS feed   Index | Next | Previous | View Threaded
 
 


Interested in having your list archived? Contact Gossamer Threads
 
  Web Applications & Managed Hosting Powered by Gossamer Threads Inc.