Text file src/internal/runtime/atomic/atomic_386.s

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  #include "textflag.h"
     6  #include "funcdata.h"
     7  
     8  // func Cas(ptr *int32, old, new int32) bool
     9  // Atomically:
    10  //	if *ptr == old {
    11  //		*ptr = new
    12  //		return true
    13  //	} else {
    14  //		return false
    15  //	}
    16  TEXT ·Cas(SB), NOSPLIT, $0-13
    17  	MOVL	ptr+0(FP), BX
    18  	MOVL	old+4(FP), AX
    19  	MOVL	new+8(FP), CX
    20  	LOCK
    21  	CMPXCHGL	CX, 0(BX)
    22  	SETEQ	ret+12(FP)
    23  	RET
    24  
    25  TEXT ·Casint32(SB), NOSPLIT, $0-13
    26  	JMP	·Cas(SB)
    27  
    28  TEXT ·Casint64(SB), NOSPLIT, $0-21
    29  	JMP	·Cas64(SB)
    30  
    31  TEXT ·Casuintptr(SB), NOSPLIT, $0-13
    32  	JMP	·Cas(SB)
    33  
    34  TEXT ·CasRel(SB), NOSPLIT, $0-13
    35  	JMP	·Cas(SB)
    36  
    37  TEXT ·Loaduintptr(SB), NOSPLIT, $0-8
    38  	JMP	·Load(SB)
    39  
    40  TEXT ·Loaduint(SB), NOSPLIT, $0-8
    41  	JMP	·Load(SB)
    42  
    43  TEXT ·Storeint32(SB), NOSPLIT, $0-8
    44  	JMP	·Store(SB)
    45  
    46  TEXT ·Storeint64(SB), NOSPLIT, $0-12
    47  	JMP	·Store64(SB)
    48  
    49  TEXT ·Storeuintptr(SB), NOSPLIT, $0-8
    50  	JMP	·Store(SB)
    51  
    52  TEXT ·Xadduintptr(SB), NOSPLIT, $0-12
    53  	JMP	·Xadd(SB)
    54  
    55  TEXT ·Loadint32(SB), NOSPLIT, $0-8
    56  	JMP	·Load(SB)
    57  
    58  TEXT ·Loadint64(SB), NOSPLIT, $0-12
    59  	JMP	·Load64(SB)
    60  
    61  TEXT ·Xaddint32(SB), NOSPLIT, $0-12
    62  	JMP	·Xadd(SB)
    63  
    64  TEXT ·Xaddint64(SB), NOSPLIT, $0-20
    65  	JMP	·Xadd64(SB)
    66  
    67  // func Cas64(ptr *uint64, old, new uint64) bool
    68  // Atomically:
    69  //	if *ptr == old {
    70  //		*ptr = new
    71  //		return true
    72  //	} else {
    73  //		return false
    74  //	}
    75  TEXT ·Cas64(SB), NOSPLIT, $0-21
    76  	NO_LOCAL_POINTERS
    77  	MOVL	ptr+0(FP), BP
    78  	TESTL	$7, BP
    79  	JZ	2(PC)
    80  	CALL	·panicUnaligned(SB)
    81  	MOVL	old_lo+4(FP), AX
    82  	MOVL	old_hi+8(FP), DX
    83  	MOVL	new_lo+12(FP), BX
    84  	MOVL	new_hi+16(FP), CX
    85  	LOCK
    86  	CMPXCHG8B	0(BP)
    87  	SETEQ	ret+20(FP)
    88  	RET
    89  
    90  // func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
    91  // Atomically:
    92  //	if *ptr == old {
    93  //		*ptr = new
    94  //		return true
    95  //	} else {
    96  //		return false
    97  //	}
    98  TEXT ·Casp1(SB), NOSPLIT, $0-13
    99  	MOVL	ptr+0(FP), BX
   100  	MOVL	old+4(FP), AX
   101  	MOVL	new+8(FP), CX
   102  	LOCK
   103  	CMPXCHGL	CX, 0(BX)
   104  	SETEQ	ret+12(FP)
   105  	RET
   106  
   107  // uint32 Xadd(uint32 volatile *val, int32 delta)
   108  // Atomically:
   109  //	*val += delta;
   110  //	return *val;
   111  TEXT ·Xadd(SB), NOSPLIT, $0-12
   112  	MOVL	ptr+0(FP), BX
   113  	MOVL	delta+4(FP), AX
   114  	MOVL	AX, CX
   115  	LOCK
   116  	XADDL	AX, 0(BX)
   117  	ADDL	CX, AX
   118  	MOVL	AX, ret+8(FP)
   119  	RET
   120  
   121  TEXT ·Xadd64(SB), NOSPLIT, $0-20
   122  	NO_LOCAL_POINTERS
   123  	// no XADDQ so use CMPXCHG8B loop
   124  	MOVL	ptr+0(FP), BP
   125  	TESTL	$7, BP
   126  	JZ	2(PC)
   127  	CALL	·panicUnaligned(SB)
   128  	// DI:SI = delta
   129  	MOVL	delta_lo+4(FP), SI
   130  	MOVL	delta_hi+8(FP), DI
   131  	// DX:AX = *addr
   132  	MOVL	0(BP), AX
   133  	MOVL	4(BP), DX
   134  addloop:
   135  	// CX:BX = DX:AX (*addr) + DI:SI (delta)
   136  	MOVL	AX, BX
   137  	MOVL	DX, CX
   138  	ADDL	SI, BX
   139  	ADCL	DI, CX
   140  
   141  	// if *addr == DX:AX {
   142  	//	*addr = CX:BX
   143  	// } else {
   144  	//	DX:AX = *addr
   145  	// }
   146  	// all in one instruction
   147  	LOCK
   148  	CMPXCHG8B	0(BP)
   149  
   150  	JNZ	addloop
   151  
   152  	// success
   153  	// return CX:BX
   154  	MOVL	BX, ret_lo+12(FP)
   155  	MOVL	CX, ret_hi+16(FP)
   156  	RET
   157  
   158  // uint8 Xchg8(uint8 *ptr, uint8 new)
   159  TEXT ·Xchg8(SB), NOSPLIT, $0-9
   160  	MOVL	ptr+0(FP), BX
   161  	MOVB	new+4(FP), AX
   162  	XCHGB	AX, 0(BX)
   163  	MOVB	AX, ret+8(FP)
   164  	RET
   165  
   166  TEXT ·Xchg(SB), NOSPLIT, $0-12
   167  	MOVL	ptr+0(FP), BX
   168  	MOVL	new+4(FP), AX
   169  	XCHGL	AX, 0(BX)
   170  	MOVL	AX, ret+8(FP)
   171  	RET
   172  
   173  TEXT ·Xchgint32(SB), NOSPLIT, $0-12
   174  	JMP	·Xchg(SB)
   175  
   176  TEXT ·Xchgint64(SB), NOSPLIT, $0-20
   177  	JMP	·Xchg64(SB)
   178  
   179  TEXT ·Xchguintptr(SB), NOSPLIT, $0-12
   180  	JMP	·Xchg(SB)
   181  
   182  TEXT ·Xchg64(SB),NOSPLIT,$0-20
   183  	NO_LOCAL_POINTERS
   184  	// no XCHGQ so use CMPXCHG8B loop
   185  	MOVL	ptr+0(FP), BP
   186  	TESTL	$7, BP
   187  	JZ	2(PC)
   188  	CALL	·panicUnaligned(SB)
   189  	// CX:BX = new
   190  	MOVL	new_lo+4(FP), BX
   191  	MOVL	new_hi+8(FP), CX
   192  	// DX:AX = *addr
   193  	MOVL	0(BP), AX
   194  	MOVL	4(BP), DX
   195  swaploop:
   196  	// if *addr == DX:AX
   197  	//	*addr = CX:BX
   198  	// else
   199  	//	DX:AX = *addr
   200  	// all in one instruction
   201  	LOCK
   202  	CMPXCHG8B	0(BP)
   203  	JNZ	swaploop
   204  
   205  	// success
   206  	// return DX:AX
   207  	MOVL	AX, ret_lo+12(FP)
   208  	MOVL	DX, ret_hi+16(FP)
   209  	RET
   210  
   211  TEXT ·StorepNoWB(SB), NOSPLIT, $0-8
   212  	MOVL	ptr+0(FP), BX
   213  	MOVL	val+4(FP), AX
   214  	XCHGL	AX, 0(BX)
   215  	RET
   216  
   217  TEXT ·Store(SB), NOSPLIT, $0-8
   218  	MOVL	ptr+0(FP), BX
   219  	MOVL	val+4(FP), AX
   220  	XCHGL	AX, 0(BX)
   221  	RET
   222  
   223  TEXT ·StoreRel(SB), NOSPLIT, $0-8
   224  	JMP	·Store(SB)
   225  
   226  TEXT ·StoreReluintptr(SB), NOSPLIT, $0-8
   227  	JMP	·Store(SB)
   228  
   229  // uint64 atomicload64(uint64 volatile* addr);
   230  TEXT ·Load64(SB), NOSPLIT, $0-12
   231  	NO_LOCAL_POINTERS
   232  	MOVL	ptr+0(FP), AX
   233  	TESTL	$7, AX
   234  	JZ	2(PC)
   235  	CALL	·panicUnaligned(SB)
   236  	MOVQ	(AX), M0
   237  	MOVQ	M0, ret+4(FP)
   238  	EMMS
   239  	RET
   240  
   241  // void ·Store64(uint64 volatile* addr, uint64 v);
   242  TEXT ·Store64(SB), NOSPLIT, $0-12
   243  	NO_LOCAL_POINTERS
   244  	MOVL	ptr+0(FP), AX
   245  	TESTL	$7, AX
   246  	JZ	2(PC)
   247  	CALL	·panicUnaligned(SB)
   248  	// MOVQ and EMMS were introduced on the Pentium MMX.
   249  	MOVQ	val+4(FP), M0
   250  	MOVQ	M0, (AX)
   251  	EMMS
   252  	// This is essentially a no-op, but it provides required memory fencing.
   253  	// It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2).
   254  	XORL	AX, AX
   255  	LOCK
   256  	XADDL	AX, (SP)
   257  	RET
   258  
   259  // void	·Or8(byte volatile*, byte);
   260  TEXT ·Or8(SB), NOSPLIT, $0-5
   261  	MOVL	ptr+0(FP), AX
   262  	MOVB	val+4(FP), BX
   263  	LOCK
   264  	ORB	BX, (AX)
   265  	RET
   266  
   267  // void	·And8(byte volatile*, byte);
   268  TEXT ·And8(SB), NOSPLIT, $0-5
   269  	MOVL	ptr+0(FP), AX
   270  	MOVB	val+4(FP), BX
   271  	LOCK
   272  	ANDB	BX, (AX)
   273  	RET
   274  
   275  TEXT ·Store8(SB), NOSPLIT, $0-5
   276  	MOVL	ptr+0(FP), BX
   277  	MOVB	val+4(FP), AX
   278  	XCHGB	AX, 0(BX)
   279  	RET
   280  
   281  // func Or(addr *uint32, v uint32)
   282  TEXT ·Or(SB), NOSPLIT, $0-8
   283  	MOVL	ptr+0(FP), AX
   284  	MOVL	val+4(FP), BX
   285  	LOCK
   286  	ORL	BX, (AX)
   287  	RET
   288  
   289  // func And(addr *uint32, v uint32)
   290  TEXT ·And(SB), NOSPLIT, $0-8
   291  	MOVL	ptr+0(FP), AX
   292  	MOVL	val+4(FP), BX
   293  	LOCK
   294  	ANDL	BX, (AX)
   295  	RET
   296  
   297  // func And32(addr *uint32, v uint32) old uint32
   298  TEXT ·And32(SB), NOSPLIT, $0-12
   299  	MOVL	ptr+0(FP), BX
   300  	MOVL	val+4(FP), CX
   301  casloop:
   302  	MOVL 	CX, DX
   303  	MOVL	(BX), AX
   304  	ANDL	AX, DX
   305  	LOCK
   306  	CMPXCHGL	DX, (BX)
   307  	JNZ casloop
   308  	MOVL 	AX, ret+8(FP)
   309  	RET
   310  
   311  // func Or32(addr *uint32, v uint32) old uint32
   312  TEXT ·Or32(SB), NOSPLIT, $0-12
   313  	MOVL	ptr+0(FP), BX
   314  	MOVL	val+4(FP), CX
   315  casloop:
   316  	MOVL 	CX, DX
   317  	MOVL	(BX), AX
   318  	ORL	AX, DX
   319  	LOCK
   320  	CMPXCHGL	DX, (BX)
   321  	JNZ casloop
   322  	MOVL 	AX, ret+8(FP)
   323  	RET
   324  
   325  // func And64(addr *uint64, v uint64) old uint64
   326  TEXT ·And64(SB), NOSPLIT, $0-20
   327  	MOVL	ptr+0(FP), BP
   328  	// DI:SI = v
   329  	MOVL	val_lo+4(FP), SI
   330  	MOVL	val_hi+8(FP), DI
   331  	// DX:AX = *addr
   332  	MOVL	0(BP), AX
   333  	MOVL	4(BP), DX
   334  casloop:
   335  	// CX:BX = DX:AX (*addr) & DI:SI (mask)
   336  	MOVL	AX, BX
   337  	MOVL	DX, CX
   338  	ANDL	SI, BX
   339  	ANDL	DI, CX
   340  	LOCK
   341  	CMPXCHG8B	0(BP)
   342  	JNZ casloop
   343  	MOVL	AX, ret_lo+12(FP)
   344  	MOVL	DX, ret_hi+16(FP)
   345  	RET
   346  
   347  
   348  // func Or64(addr *uint64, v uint64) old uint64
   349  TEXT ·Or64(SB), NOSPLIT, $0-20
   350  	MOVL	ptr+0(FP), BP
   351  	// DI:SI = v
   352  	MOVL	val_lo+4(FP), SI
   353  	MOVL	val_hi+8(FP), DI
   354  	// DX:AX = *addr
   355  	MOVL	0(BP), AX
   356  	MOVL	4(BP), DX
   357  casloop:
   358  	// CX:BX = DX:AX (*addr) | DI:SI (mask)
   359  	MOVL	AX, BX
   360  	MOVL	DX, CX
   361  	ORL	SI, BX
   362  	ORL	DI, CX
   363  	LOCK
   364  	CMPXCHG8B	0(BP)
   365  	JNZ casloop
   366  	MOVL	AX, ret_lo+12(FP)
   367  	MOVL	DX, ret_hi+16(FP)
   368  	RET
   369  
   370  // func Anduintptr(addr *uintptr, v uintptr) old uintptr
   371  TEXT ·Anduintptr(SB), NOSPLIT, $0-12
   372  	JMP	·And32(SB)
   373  
   374  // func Oruintptr(addr *uintptr, v uintptr) old uintptr
   375  TEXT ·Oruintptr(SB), NOSPLIT, $0-12
   376  	JMP	·Or32(SB)
   377  

View as plain text