Text file src/internal/runtime/atomic/atomic_mips64x.s

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build mips64 || mips64le
     6  
     7  #include "textflag.h"
     8  
     9  #define SYNC	WORD $0xf
    10  
    11  // func cas(ptr *uint32, old, new uint32) bool
    12  // Atomically:
    13  //	if *ptr == old {
    14  //		*ptr = new
    15  //		return true
    16  //	} else {
    17  //		return false
    18  //	}
    19  TEXT ·Cas(SB), NOSPLIT, $0-17
    20  	MOVV	ptr+0(FP), R1
    21  	MOVW	old+8(FP), R2
    22  	MOVW	new+12(FP), R5
    23  	SYNC
    24  cas_again:
    25  	MOVV	R5, R3
    26  	LL	(R1), R4
    27  	BNE	R2, R4, cas_fail
    28  	SC	R3, (R1)
    29  	BEQ	R3, cas_again
    30  	MOVV	$1, R1
    31  	MOVB	R1, ret+16(FP)
    32  	SYNC
    33  	RET
    34  cas_fail:
    35  	MOVV	$0, R1
    36  	JMP	-4(PC)
    37  
    38  // func	Cas64(ptr *uint64, old, new uint64) bool
    39  // Atomically:
    40  //	if *ptr == old {
    41  //		*ptr = new
    42  //		return true
    43  //	} else {
    44  //		return false
    45  //	}
    46  TEXT ·Cas64(SB), NOSPLIT, $0-25
    47  	MOVV	ptr+0(FP), R1
    48  	MOVV	old+8(FP), R2
    49  	MOVV	new+16(FP), R5
    50  	SYNC
    51  cas64_again:
    52  	MOVV	R5, R3
    53  	LLV	(R1), R4
    54  	BNE	R2, R4, cas64_fail
    55  	SCV	R3, (R1)
    56  	BEQ	R3, cas64_again
    57  	MOVV	$1, R1
    58  	MOVB	R1, ret+24(FP)
    59  	SYNC
    60  	RET
    61  cas64_fail:
    62  	MOVV	$0, R1
    63  	JMP	-4(PC)
    64  
    65  TEXT ·Casint32(SB), NOSPLIT, $0-17
    66  	JMP	·Cas(SB)
    67  
    68  TEXT ·Casint64(SB), NOSPLIT, $0-25
    69  	JMP	·Cas64(SB)
    70  
    71  TEXT ·Casuintptr(SB), NOSPLIT, $0-25
    72  	JMP	·Cas64(SB)
    73  
    74  TEXT ·CasRel(SB), NOSPLIT, $0-17
    75  	JMP	·Cas(SB)
    76  
    77  TEXT ·Loaduintptr(SB),  NOSPLIT|NOFRAME, $0-16
    78  	JMP	·Load64(SB)
    79  
    80  TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16
    81  	JMP	·Load64(SB)
    82  
    83  TEXT ·Storeint32(SB), NOSPLIT, $0-12
    84  	JMP	·Store(SB)
    85  
    86  TEXT ·Storeint64(SB), NOSPLIT, $0-16
    87  	JMP	·Store64(SB)
    88  
    89  TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
    90  	JMP	·Store64(SB)
    91  
    92  TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
    93  	JMP	·Xadd64(SB)
    94  
    95  TEXT ·Loadint32(SB), NOSPLIT, $0-12
    96  	JMP	·Load(SB)
    97  
    98  TEXT ·Loadint64(SB), NOSPLIT, $0-16
    99  	JMP	·Load64(SB)
   100  
   101  TEXT ·Xaddint32(SB), NOSPLIT, $0-20
   102  	JMP	·Xadd(SB)
   103  
   104  TEXT ·Xaddint64(SB), NOSPLIT, $0-24
   105  	JMP	·Xadd64(SB)
   106  
   107  // func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
   108  // Atomically:
   109  //	if *ptr == old {
   110  //		*ptr = new
   111  //		return true
   112  //	} else {
   113  //		return false
   114  //	}
   115  TEXT ·Casp1(SB), NOSPLIT, $0-25
   116  	JMP ·Cas64(SB)
   117  
   118  // uint32 xadd(uint32 volatile *ptr, int32 delta)
   119  // Atomically:
   120  //	*val += delta;
   121  //	return *val;
   122  TEXT ·Xadd(SB), NOSPLIT, $0-20
   123  	MOVV	ptr+0(FP), R2
   124  	MOVW	delta+8(FP), R3
   125  	SYNC
   126  	LL	(R2), R1
   127  	ADDU	R1, R3, R4
   128  	MOVV	R4, R1
   129  	SC	R4, (R2)
   130  	BEQ	R4, -4(PC)
   131  	MOVW	R1, ret+16(FP)
   132  	SYNC
   133  	RET
   134  
   135  // uint64 Xadd64(uint64 volatile *ptr, int64 delta)
   136  // Atomically:
   137  //	*val += delta;
   138  //	return *val;
   139  TEXT ·Xadd64(SB), NOSPLIT, $0-24
   140  	MOVV	ptr+0(FP), R2
   141  	MOVV	delta+8(FP), R3
   142  	SYNC
   143  	LLV	(R2), R1
   144  	ADDVU	R1, R3, R4
   145  	MOVV	R4, R1
   146  	SCV	R4, (R2)
   147  	BEQ	R4, -4(PC)
   148  	MOVV	R1, ret+16(FP)
   149  	SYNC
   150  	RET
   151  
   152  // uint8 Xchg(ptr *uint8, new uint8)
   153  // Atomically:
   154  //	old := *ptr;
   155  //	*ptr = new;
   156  //	return old;
   157  TEXT ·Xchg8(SB), NOSPLIT, $0-17
   158  	MOVV	ptr+0(FP), R2
   159  	MOVBU	new+8(FP), R5
   160  #ifdef GOARCH_mips64
   161  	// Big endian.  ptr = ptr ^ 3
   162  	XOR	$3, R2
   163  #endif
   164  	// R4 = ((ptr & 3) * 8)
   165  	AND	$3, R2, R4
   166  	SLLV	$3, R4
   167  	// Shift val for aligned ptr. R7 = (0xFF << R4) ^ (-1)
   168  	MOVV	$0xFF, R7
   169  	SLLV	R4, R7
   170  	XOR	$-1, R7
   171  	AND	$~3, R2
   172  	SLLV	R4, R5
   173  
   174  	SYNC
   175  	LL	(R2), R9
   176  	AND	R7, R9, R8
   177  	OR	R5, R8
   178  	SC	R8, (R2)
   179  	BEQ	R8, -5(PC)
   180  	SYNC
   181  	SRLV	R4, R9
   182  	MOVBU	R9, ret+16(FP)
   183  	RET
   184  
   185  // uint32 Xchg(ptr *uint32, new uint32)
   186  // Atomically:
   187  //	old := *ptr;
   188  //	*ptr = new;
   189  //	return old;
   190  TEXT ·Xchg(SB), NOSPLIT, $0-20
   191  	MOVV	ptr+0(FP), R2
   192  	MOVW	new+8(FP), R5
   193  
   194  	SYNC
   195  	MOVV	R5, R3
   196  	LL	(R2), R1
   197  	SC	R3, (R2)
   198  	BEQ	R3, -3(PC)
   199  	MOVW	R1, ret+16(FP)
   200  	SYNC
   201  	RET
   202  
   203  // uint64 Xchg64(ptr *uint64, new uint64)
   204  // Atomically:
   205  //	old := *ptr;
   206  //	*ptr = new;
   207  //	return old;
   208  TEXT ·Xchg64(SB), NOSPLIT, $0-24
   209  	MOVV	ptr+0(FP), R2
   210  	MOVV	new+8(FP), R5
   211  
   212  	SYNC
   213  	MOVV	R5, R3
   214  	LLV	(R2), R1
   215  	SCV	R3, (R2)
   216  	BEQ	R3, -3(PC)
   217  	MOVV	R1, ret+16(FP)
   218  	SYNC
   219  	RET
   220  
   221  TEXT ·Xchgint32(SB), NOSPLIT, $0-20
   222  	JMP	·Xchg(SB)
   223  
   224  TEXT ·Xchgint64(SB), NOSPLIT, $0-24
   225  	JMP	·Xchg64(SB)
   226  
   227  TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
   228  	JMP	·Xchg64(SB)
   229  
   230  TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
   231  	JMP	·Store64(SB)
   232  
   233  TEXT ·StoreRel(SB), NOSPLIT, $0-12
   234  	JMP	·Store(SB)
   235  
   236  TEXT ·StoreRel64(SB), NOSPLIT, $0-16
   237  	JMP	·Store64(SB)
   238  
   239  TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
   240  	JMP	·Store64(SB)
   241  
   242  TEXT ·Store(SB), NOSPLIT, $0-12
   243  	MOVV	ptr+0(FP), R1
   244  	MOVW	val+8(FP), R2
   245  	SYNC
   246  	MOVW	R2, 0(R1)
   247  	SYNC
   248  	RET
   249  
   250  TEXT ·Store8(SB), NOSPLIT, $0-9
   251  	MOVV	ptr+0(FP), R1
   252  	MOVB	val+8(FP), R2
   253  	SYNC
   254  	MOVB	R2, 0(R1)
   255  	SYNC
   256  	RET
   257  
   258  TEXT ·Store64(SB), NOSPLIT, $0-16
   259  	MOVV	ptr+0(FP), R1
   260  	MOVV	val+8(FP), R2
   261  	SYNC
   262  	MOVV	R2, 0(R1)
   263  	SYNC
   264  	RET
   265  
   266  // void	Or8(byte volatile*, byte);
   267  TEXT ·Or8(SB), NOSPLIT, $0-9
   268  	MOVV	ptr+0(FP), R1
   269  	MOVBU	val+8(FP), R2
   270  	// Align ptr down to 4 bytes so we can use 32-bit load/store.
   271  	MOVV	$~3, R3
   272  	AND	R1, R3
   273  	// Compute val shift.
   274  #ifdef GOARCH_mips64
   275  	// Big endian.  ptr = ptr ^ 3
   276  	XOR	$3, R1
   277  #endif
   278  	// R4 = ((ptr & 3) * 8)
   279  	AND	$3, R1, R4
   280  	SLLV	$3, R4
   281  	// Shift val for aligned ptr. R2 = val << R4
   282  	SLLV	R4, R2
   283  
   284  	SYNC
   285  	LL	(R3), R4
   286  	OR	R2, R4
   287  	SC	R4, (R3)
   288  	BEQ	R4, -4(PC)
   289  	SYNC
   290  	RET
   291  
   292  // void	And8(byte volatile*, byte);
   293  TEXT ·And8(SB), NOSPLIT, $0-9
   294  	MOVV	ptr+0(FP), R1
   295  	MOVBU	val+8(FP), R2
   296  	// Align ptr down to 4 bytes so we can use 32-bit load/store.
   297  	MOVV	$~3, R3
   298  	AND	R1, R3
   299  	// Compute val shift.
   300  #ifdef GOARCH_mips64
   301  	// Big endian.  ptr = ptr ^ 3
   302  	XOR	$3, R1
   303  #endif
   304  	// R4 = ((ptr & 3) * 8)
   305  	AND	$3, R1, R4
   306  	SLLV	$3, R4
   307  	// Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4)
   308  	MOVV	$0xFF, R5
   309  	SLLV	R4, R2
   310  	SLLV	R4, R5
   311  	NOR	R0, R5
   312  	OR	R5, R2
   313  
   314  	SYNC
   315  	LL	(R3), R4
   316  	AND	R2, R4
   317  	SC	R4, (R3)
   318  	BEQ	R4, -4(PC)
   319  	SYNC
   320  	RET
   321  
   322  // func Or(addr *uint32, v uint32)
   323  TEXT ·Or(SB), NOSPLIT, $0-12
   324  	MOVV	ptr+0(FP), R1
   325  	MOVW	val+8(FP), R2
   326  
   327  	SYNC
   328  	LL	(R1), R3
   329  	OR	R2, R3
   330  	SC	R3, (R1)
   331  	BEQ	R3, -4(PC)
   332  	SYNC
   333  	RET
   334  
   335  // func And(addr *uint32, v uint32)
   336  TEXT ·And(SB), NOSPLIT, $0-12
   337  	MOVV	ptr+0(FP), R1
   338  	MOVW	val+8(FP), R2
   339  
   340  	SYNC
   341  	LL	(R1), R3
   342  	AND	R2, R3
   343  	SC	R3, (R1)
   344  	BEQ	R3, -4(PC)
   345  	SYNC
   346  	RET
   347  
   348  // func Or32(addr *uint32, v uint32) old uint32
   349  TEXT ·Or32(SB), NOSPLIT, $0-20
   350  	MOVV	ptr+0(FP), R1
   351  	MOVW	val+8(FP), R2
   352  
   353  	SYNC
   354  	LL	(R1), R3
   355  	OR	R2, R3, R4
   356  	SC	R4, (R1)
   357  	BEQ	R4, -3(PC)
   358  	SYNC
   359  	MOVW	R3, ret+16(FP)
   360  	RET
   361  
   362  // func And32(addr *uint32, v uint32) old uint32
   363  TEXT ·And32(SB), NOSPLIT, $0-20
   364  	MOVV	ptr+0(FP), R1
   365  	MOVW	val+8(FP), R2
   366  
   367  	SYNC
   368  	LL	(R1), R3
   369  	AND	R2, R3, R4
   370  	SC	R4, (R1)
   371  	BEQ	R4, -3(PC)
   372  	SYNC
   373  	MOVW	R3, ret+16(FP)
   374  	RET
   375  
   376  // func Or64(addr *uint64, v uint64) old uint64
   377  TEXT ·Or64(SB), NOSPLIT, $0-24
   378  	MOVV	ptr+0(FP), R1
   379  	MOVV	val+8(FP), R2
   380  
   381  	SYNC
   382  	LLV	(R1), R3
   383  	OR	R2, R3, R4
   384  	SCV	R4, (R1)
   385  	BEQ	R4, -3(PC)
   386  	SYNC
   387  	MOVV	R3, ret+16(FP)
   388  	RET
   389  
   390  // func And64(addr *uint64, v uint64) old uint64
   391  TEXT ·And64(SB), NOSPLIT, $0-24
   392  	MOVV	ptr+0(FP), R1
   393  	MOVV	val+8(FP), R2
   394  
   395  	SYNC
   396  	LLV	(R1), R3
   397  	AND	R2, R3, R4
   398  	SCV	R4, (R1)
   399  	BEQ	R4, -3(PC)
   400  	SYNC
   401  	MOVV	R3, ret+16(FP)
   402  	RET
   403  
   404  // func Anduintptr(addr *uintptr, v uintptr) old uintptr
   405  TEXT ·Anduintptr(SB), NOSPLIT, $0-24
   406  	JMP	·And64(SB)
   407  
   408  // func Oruintptr(addr *uintptr, v uintptr) old uintptr
   409  TEXT ·Oruintptr(SB), NOSPLIT, $0-24
   410  	JMP	·Or64(SB)
   411  
   412  // uint32 ·Load(uint32 volatile* ptr)
   413  TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12
   414  	MOVV	ptr+0(FP), R1
   415  	SYNC
   416  	MOVWU	0(R1), R1
   417  	SYNC
   418  	MOVW	R1, ret+8(FP)
   419  	RET
   420  
   421  // uint8 ·Load8(uint8 volatile* ptr)
   422  TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9
   423  	MOVV	ptr+0(FP), R1
   424  	SYNC
   425  	MOVBU	0(R1), R1
   426  	SYNC
   427  	MOVB	R1, ret+8(FP)
   428  	RET
   429  
   430  // uint64 ·Load64(uint64 volatile* ptr)
   431  TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16
   432  	MOVV	ptr+0(FP), R1
   433  	SYNC
   434  	MOVV	0(R1), R1
   435  	SYNC
   436  	MOVV	R1, ret+8(FP)
   437  	RET
   438  
   439  // void *·Loadp(void *volatile *ptr)
   440  TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16
   441  	MOVV	ptr+0(FP), R1
   442  	SYNC
   443  	MOVV	0(R1), R1
   444  	SYNC
   445  	MOVV	R1, ret+8(FP)
   446  	RET
   447  
   448  // uint32 ·LoadAcq(uint32 volatile* ptr)
   449  TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12
   450  	JMP	atomic·Load(SB)
   451  
   452  // uint64 ·LoadAcq64(uint64 volatile* ptr)
   453  TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16
   454  	JMP	atomic·Load64(SB)
   455  
   456  // uintptr ·LoadAcquintptr(uintptr volatile* ptr)
   457  TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16
   458  	JMP	atomic·Load64(SB)
   459  

View as plain text