github.com/llvm-mirror/llgo@v0.0.0-20190322182713-bf6f0a60fce1/third_party/gofrontend/libgo/runtime/lfstack.goc (about) 1 // Copyright 2012 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Lock-free stack. 6 7 package runtime 8 #include "runtime.h" 9 #include "arch.h" 10 11 #if __SIZEOF_POINTER__ == 8 12 // SPARC64 and Solaris on AMD64 uses all 64 bits of virtual addresses. 13 // Use low-order three bits as ABA counter. 14 // http://docs.oracle.com/cd/E19120-01/open.solaris/816-5138/6mba6ua5p/index.html 15 # if defined(__sparc__) || (defined(__sun__) && defined(__amd64__)) 16 static inline uint64 lfPack(LFNode *node, uintptr cnt) { 17 return ((uint64)(node)) | ((cnt)&7); 18 } 19 static inline LFNode* lfUnpack(uint64 val) { 20 return (LFNode*)(val&~7); 21 } 22 # else 23 # if defined(__aarch64__) 24 // Depending on the kernel options, pointers on arm64 can have up to 48 significant 25 // bits (see https://www.kernel.org/doc/Documentation/arm64/memory.txt). 26 # define PTR_BITS 48 27 # else 28 // Amd64 uses 48-bit virtual addresses, 47-th bit is used as kernel/user flag. 29 // So we use 17msb of pointers as ABA counter. 30 # define PTR_BITS 47 31 # endif 32 # define CNT_BITS (64 - PTR_BITS + 3) 33 static inline uint64 lfPack(LFNode *node, uintptr cnt) { 34 return ((uint64)(node)<<(64-PTR_BITS)) | (cnt&(((1<<CNT_BITS)-1))); 35 } 36 static inline LFNode* lfUnpack(uint64 val) { 37 return (LFNode*)((val >> CNT_BITS) << 3); 38 } 39 # endif 40 #else 41 static inline uint64 lfPack(LFNode *node, uintptr cnt) { 42 return ((uint64)(uintptr)(node)<<32) | cnt; 43 } 44 static inline LFNode* lfUnpack(uint64 val) { 45 return (LFNode*)(uintptr)(val >> 32); 46 } 47 #endif 48 49 void 50 runtime_lfstackpush(uint64 *head, LFNode *node) 51 { 52 uint64 old, new; 53 54 if(node != lfUnpack(lfPack(node, 0))) { 55 runtime_printf("p=%p\n", node); 56 runtime_throw("runtime_lfstackpush: invalid pointer"); 57 } 58 59 node->pushcnt++; 60 new = lfPack(node, node->pushcnt); 61 for(;;) { 62 old = runtime_atomicload64(head); 63 node->next = lfUnpack(old); 64 if(runtime_cas64(head, old, new)) 65 break; 66 } 67 } 68 69 LFNode* 70 runtime_lfstackpop(uint64 *head) 71 { 72 LFNode *node, *node2; 73 uint64 old, new; 74 75 for(;;) { 76 old = runtime_atomicload64(head); 77 if(old == 0) 78 return nil; 79 node = lfUnpack(old); 80 node2 = runtime_atomicloadp(&node->next); 81 new = 0; 82 if(node2 != nil) 83 new = lfPack(node2, node2->pushcnt); 84 if(runtime_cas64(head, old, new)) 85 return node; 86 } 87 } 88 89 func lfstackpush_go(head *uint64, node *LFNode) { 90 runtime_lfstackpush(head, node); 91 } 92 93 func lfstackpop_go(head *uint64) (node *LFNode) { 94 node = runtime_lfstackpop(head); 95 }