github.com/sbinet/go@v0.0.0-20160827155028-54d7de7dd62b/src/runtime/internal/atomic/asm_amd64.s (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Note: some of these functions are semantically inlined 6 // by the compiler (in src/cmd/compile/internal/gc/ssa.go). 7 8 #include "textflag.h" 9 10 // bool Cas(int32 *val, int32 old, int32 new) 11 // Atomically: 12 // if(*val == old){ 13 // *val = new; 14 // return 1; 15 // } else 16 // return 0; 17 TEXT runtime∕internal∕atomic·Cas(SB),NOSPLIT,$0-17 18 MOVQ ptr+0(FP), BX 19 MOVL old+8(FP), AX 20 MOVL new+12(FP), CX 21 LOCK 22 CMPXCHGL CX, 0(BX) 23 SETEQ ret+16(FP) 24 RET 25 26 // bool runtime∕internal∕atomic·Cas64(uint64 *val, uint64 old, uint64 new) 27 // Atomically: 28 // if(*val == *old){ 29 // *val = new; 30 // return 1; 31 // } else { 32 // return 0; 33 // } 34 TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25 35 MOVQ ptr+0(FP), BX 36 MOVQ old+8(FP), AX 37 MOVQ new+16(FP), CX 38 LOCK 39 CMPXCHGQ CX, 0(BX) 40 SETEQ ret+24(FP) 41 RET 42 43 TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25 44 JMP runtime∕internal∕atomic·Cas64(SB) 45 46 TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-16 47 JMP runtime∕internal∕atomic·Load64(SB) 48 49 TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $0-16 50 JMP runtime∕internal∕atomic·Load64(SB) 51 52 TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-16 53 JMP runtime∕internal∕atomic·Store64(SB) 54 55 TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16 56 JMP runtime∕internal∕atomic·Load64(SB) 57 58 TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-24 59 JMP runtime∕internal∕atomic·Xadd64(SB) 60 61 // bool Casp1(void **val, void *old, void *new) 62 // Atomically: 63 // if(*val == old){ 64 // *val = new; 65 // return 1; 66 // } else 67 // return 0; 68 TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-25 69 MOVQ ptr+0(FP), BX 70 MOVQ old+8(FP), AX 71 MOVQ new+16(FP), CX 72 LOCK 73 CMPXCHGQ CX, 0(BX) 74 SETEQ ret+24(FP) 75 RET 76 77 // uint32 Xadd(uint32 volatile *val, int32 delta) 78 // Atomically: 79 // *val += delta; 80 // return *val; 81 TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-20 82 MOVQ ptr+0(FP), BX 83 MOVL delta+8(FP), AX 84 MOVL AX, CX 85 LOCK 86 XADDL AX, 0(BX) 87 ADDL CX, AX 88 MOVL AX, ret+16(FP) 89 RET 90 91 TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-24 92 MOVQ ptr+0(FP), BX 93 MOVQ delta+8(FP), AX 94 MOVQ AX, CX 95 LOCK 96 XADDQ AX, 0(BX) 97 ADDQ CX, AX 98 MOVQ AX, ret+16(FP) 99 RET 100 101 TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-24 102 JMP runtime∕internal∕atomic·Xadd64(SB) 103 104 TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-20 105 MOVQ ptr+0(FP), BX 106 MOVL new+8(FP), AX 107 XCHGL AX, 0(BX) 108 MOVL AX, ret+16(FP) 109 RET 110 111 TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24 112 MOVQ ptr+0(FP), BX 113 MOVQ new+8(FP), AX 114 XCHGQ AX, 0(BX) 115 MOVQ AX, ret+16(FP) 116 RET 117 118 TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-24 119 JMP runtime∕internal∕atomic·Xchg64(SB) 120 121 TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-16 122 MOVQ ptr+0(FP), BX 123 MOVQ val+8(FP), AX 124 XCHGQ AX, 0(BX) 125 RET 126 127 TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12 128 MOVQ ptr+0(FP), BX 129 MOVL val+8(FP), AX 130 XCHGL AX, 0(BX) 131 RET 132 133 TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16 134 MOVQ ptr+0(FP), BX 135 MOVQ val+8(FP), AX 136 XCHGQ AX, 0(BX) 137 RET 138 139 // void runtime∕internal∕atomic·Or8(byte volatile*, byte); 140 TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-9 141 MOVQ ptr+0(FP), AX 142 MOVB val+8(FP), BX 143 LOCK 144 ORB BX, (AX) 145 RET 146 147 // void runtime∕internal∕atomic·And8(byte volatile*, byte); 148 TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-9 149 MOVQ ptr+0(FP), AX 150 MOVB val+8(FP), BX 151 LOCK 152 ANDB BX, (AX) 153 RET