Source file test/codegen/atomics.go

     1  // asmcheck
     2  
     3  // Copyright 2024 The Go Authors. All rights reserved.
     4  // Use of this source code is governed by a BSD-style
     5  // license that can be found in the LICENSE file.
     6  
     7  // These tests check that atomic instructions without dynamic checks are
     8  // generated for architectures that support them
     9  
    10  package codegen
    11  
    12  import "sync/atomic"
    13  
    14  type Counter struct {
    15  	count int32
    16  }
    17  
    18  func (c *Counter) Increment() {
    19  	// Check that ARm64 v8.0 has both atomic instruction (LDADDALW) and a dynamic check
    20  	// (for arm64HasATOMICS), while ARM64 v8.1 has only atomic and no dynamic check.
    21  	// arm64/v8.0:"LDADDALW"
    22  	// arm64/v8.1:"LDADDALW"
    23  	// arm64/v8.0:".*arm64HasATOMICS"
    24  	// arm64/v8.1:-".*arm64HasATOMICS"
    25  	// amd64:"LOCK",-"CMPXCHG"
    26  	atomic.AddInt32(&c.count, 1)
    27  }
    28  
    29  func atomicLogical64(x *atomic.Uint64) uint64 {
    30  	var r uint64
    31  
    32  	// arm64/v8.0:"LDCLRALD"
    33  	// arm64/v8.1:"LDCLRALD"
    34  	// arm64/v8.0:".*arm64HasATOMICS"
    35  	// arm64/v8.1:-".*arm64HasATOMICS"
    36  	// On amd64, make sure we use LOCK+AND instead of CMPXCHG when we don't use the result.
    37  	// amd64:"LOCK",-"CMPXCHGQ"
    38  	x.And(11)
    39  	// arm64/v8.0:"LDCLRALD"
    40  	// arm64/v8.1:"LDCLRALD"
    41  	// arm64/v8.0:".*arm64HasATOMICS"
    42  	// arm64/v8.1:-".*arm64HasATOMICS"
    43  	// amd64:"LOCK","CMPXCHGQ"
    44  	r += x.And(22)
    45  
    46  	// arm64/v8.0:"LDORALD"
    47  	// arm64/v8.1:"LDORALD"
    48  	// arm64/v8.0:".*arm64HasATOMICS"
    49  	// arm64/v8.1:-".*arm64HasATOMICS"
    50  	// On amd64, make sure we use LOCK+OR instead of CMPXCHG when we don't use the result.
    51  	// amd64:"LOCK",-"CMPXCHGQ"
    52  	x.Or(33)
    53  	// arm64/v8.0:"LDORALD"
    54  	// arm64/v8.1:"LDORALD"
    55  	// arm64/v8.0:".*arm64HasATOMICS"
    56  	// arm64/v8.1:-".*arm64HasATOMICS"
    57  	// amd64:"LOCK","CMPXCHGQ"
    58  	r += x.Or(44)
    59  
    60  	return r
    61  }
    62  
    63  func atomicLogical32(x *atomic.Uint32) uint32 {
    64  	var r uint32
    65  
    66  	// arm64/v8.0:"LDCLRALW"
    67  	// arm64/v8.1:"LDCLRALW"
    68  	// arm64/v8.0:".*arm64HasATOMICS"
    69  	// arm64/v8.1:-".*arm64HasATOMICS"
    70  	// On amd64, make sure we use LOCK+AND instead of CMPXCHG when we don't use the result.
    71  	// amd64:"LOCK",-"CMPXCHGL"
    72  	x.And(11)
    73  	// arm64/v8.0:"LDCLRALW"
    74  	// arm64/v8.1:"LDCLRALW"
    75  	// arm64/v8.0:".*arm64HasATOMICS"
    76  	// arm64/v8.1:-".*arm64HasATOMICS"
    77  	// amd64:"LOCK","CMPXCHGL"
    78  	r += x.And(22)
    79  
    80  	// arm64/v8.0:"LDORALW"
    81  	// arm64/v8.1:"LDORALW"
    82  	// arm64/v8.0:".*arm64HasATOMICS"
    83  	// arm64/v8.1:-".*arm64HasATOMICS"
    84  	// On amd64, make sure we use LOCK+OR instead of CMPXCHG when we don't use the result.
    85  	// amd64:"LOCK",-"CMPXCHGL"
    86  	x.Or(33)
    87  	// arm64/v8.0:"LDORALW"
    88  	// arm64/v8.1:"LDORALW"
    89  	// arm64/v8.0:".*arm64HasATOMICS"
    90  	// arm64/v8.1:-".*arm64HasATOMICS"
    91  	// amd64:"LOCK","CMPXCHGL"
    92  	r += x.Or(44)
    93  
    94  	return r
    95  }
    96  

View as plain text