Source file src/sync/mutex_test.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // GOMAXPROCS=10 go test
     6  
     7  package sync_test
     8  
     9  import (
    10  	"fmt"
    11  	"internal/testenv"
    12  	"os"
    13  	"os/exec"
    14  	"runtime"
    15  	"strings"
    16  	. "sync"
    17  	"testing"
    18  	"time"
    19  )
    20  
    21  func HammerSemaphore(s *uint32, loops int, cdone chan bool) {
    22  	for i := 0; i < loops; i++ {
    23  		Runtime_Semacquire(s)
    24  		Runtime_Semrelease(s, false, 0)
    25  	}
    26  	cdone <- true
    27  }
    28  
    29  func TestSemaphore(t *testing.T) {
    30  	s := new(uint32)
    31  	*s = 1
    32  	c := make(chan bool)
    33  	for i := 0; i < 10; i++ {
    34  		go HammerSemaphore(s, 1000, c)
    35  	}
    36  	for i := 0; i < 10; i++ {
    37  		<-c
    38  	}
    39  }
    40  
    41  func BenchmarkUncontendedSemaphore(b *testing.B) {
    42  	s := new(uint32)
    43  	*s = 1
    44  	HammerSemaphore(s, b.N, make(chan bool, 2))
    45  }
    46  
    47  func BenchmarkContendedSemaphore(b *testing.B) {
    48  	b.StopTimer()
    49  	s := new(uint32)
    50  	*s = 1
    51  	c := make(chan bool)
    52  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
    53  	b.StartTimer()
    54  
    55  	go HammerSemaphore(s, b.N/2, c)
    56  	go HammerSemaphore(s, b.N/2, c)
    57  	<-c
    58  	<-c
    59  }
    60  
    61  func HammerMutex(m *Mutex, loops int, cdone chan bool) {
    62  	for i := 0; i < loops; i++ {
    63  		if i%3 == 0 {
    64  			if m.TryLock() {
    65  				m.Unlock()
    66  			}
    67  			continue
    68  		}
    69  		m.Lock()
    70  		m.Unlock()
    71  	}
    72  	cdone <- true
    73  }
    74  
    75  func TestMutex(t *testing.T) {
    76  	if n := runtime.SetMutexProfileFraction(1); n != 0 {
    77  		t.Logf("got mutexrate %d expected 0", n)
    78  	}
    79  	defer runtime.SetMutexProfileFraction(0)
    80  
    81  	m := new(Mutex)
    82  
    83  	m.Lock()
    84  	if m.TryLock() {
    85  		t.Fatalf("TryLock succeeded with mutex locked")
    86  	}
    87  	m.Unlock()
    88  	if !m.TryLock() {
    89  		t.Fatalf("TryLock failed with mutex unlocked")
    90  	}
    91  	m.Unlock()
    92  
    93  	c := make(chan bool)
    94  	for i := 0; i < 10; i++ {
    95  		go HammerMutex(m, 1000, c)
    96  	}
    97  	for i := 0; i < 10; i++ {
    98  		<-c
    99  	}
   100  }
   101  
   102  var misuseTests = []struct {
   103  	name string
   104  	f    func()
   105  }{
   106  	{
   107  		"Mutex.Unlock",
   108  		func() {
   109  			var mu Mutex
   110  			mu.Unlock()
   111  		},
   112  	},
   113  	{
   114  		"Mutex.Unlock2",
   115  		func() {
   116  			var mu Mutex
   117  			mu.Lock()
   118  			mu.Unlock()
   119  			mu.Unlock()
   120  		},
   121  	},
   122  	{
   123  		"RWMutex.Unlock",
   124  		func() {
   125  			var mu RWMutex
   126  			mu.Unlock()
   127  		},
   128  	},
   129  	{
   130  		"RWMutex.Unlock2",
   131  		func() {
   132  			var mu RWMutex
   133  			mu.RLock()
   134  			mu.Unlock()
   135  		},
   136  	},
   137  	{
   138  		"RWMutex.Unlock3",
   139  		func() {
   140  			var mu RWMutex
   141  			mu.Lock()
   142  			mu.Unlock()
   143  			mu.Unlock()
   144  		},
   145  	},
   146  	{
   147  		"RWMutex.RUnlock",
   148  		func() {
   149  			var mu RWMutex
   150  			mu.RUnlock()
   151  		},
   152  	},
   153  	{
   154  		"RWMutex.RUnlock2",
   155  		func() {
   156  			var mu RWMutex
   157  			mu.Lock()
   158  			mu.RUnlock()
   159  		},
   160  	},
   161  	{
   162  		"RWMutex.RUnlock3",
   163  		func() {
   164  			var mu RWMutex
   165  			mu.RLock()
   166  			mu.RUnlock()
   167  			mu.RUnlock()
   168  		},
   169  	},
   170  }
   171  
   172  func init() {
   173  	if len(os.Args) == 3 && os.Args[1] == "TESTMISUSE" {
   174  		for _, test := range misuseTests {
   175  			if test.name == os.Args[2] {
   176  				func() {
   177  					defer func() { recover() }()
   178  					test.f()
   179  				}()
   180  				fmt.Printf("test completed\n")
   181  				os.Exit(0)
   182  			}
   183  		}
   184  		fmt.Printf("unknown test\n")
   185  		os.Exit(0)
   186  	}
   187  }
   188  
   189  func TestMutexMisuse(t *testing.T) {
   190  	for _, test := range misuseTests {
   191  		out, err := exec.Command(testenv.Executable(t), "TESTMISUSE", test.name).CombinedOutput()
   192  		if err == nil || !strings.Contains(string(out), "unlocked") {
   193  			t.Errorf("%s: did not find failure with message about unlocked lock: %s\n%s\n", test.name, err, out)
   194  		}
   195  	}
   196  }
   197  
   198  func TestMutexFairness(t *testing.T) {
   199  	var mu Mutex
   200  	stop := make(chan bool)
   201  	defer close(stop)
   202  	go func() {
   203  		for {
   204  			mu.Lock()
   205  			time.Sleep(100 * time.Microsecond)
   206  			mu.Unlock()
   207  			select {
   208  			case <-stop:
   209  				return
   210  			default:
   211  			}
   212  		}
   213  	}()
   214  	done := make(chan bool, 1)
   215  	go func() {
   216  		for i := 0; i < 10; i++ {
   217  			time.Sleep(100 * time.Microsecond)
   218  			mu.Lock()
   219  			mu.Unlock()
   220  		}
   221  		done <- true
   222  	}()
   223  	select {
   224  	case <-done:
   225  	case <-time.After(10 * time.Second):
   226  		t.Fatalf("can't acquire Mutex in 10 seconds")
   227  	}
   228  }
   229  
   230  func BenchmarkMutexUncontended(b *testing.B) {
   231  	type PaddedMutex struct {
   232  		Mutex
   233  		pad [128]uint8
   234  	}
   235  	b.RunParallel(func(pb *testing.PB) {
   236  		var mu PaddedMutex
   237  		for pb.Next() {
   238  			mu.Lock()
   239  			mu.Unlock()
   240  		}
   241  	})
   242  }
   243  
   244  func benchmarkMutex(b *testing.B, slack, work bool) {
   245  	var mu Mutex
   246  	if slack {
   247  		b.SetParallelism(10)
   248  	}
   249  	b.RunParallel(func(pb *testing.PB) {
   250  		foo := 0
   251  		for pb.Next() {
   252  			mu.Lock()
   253  			mu.Unlock()
   254  			if work {
   255  				for i := 0; i < 100; i++ {
   256  					foo *= 2
   257  					foo /= 2
   258  				}
   259  			}
   260  		}
   261  		_ = foo
   262  	})
   263  }
   264  
   265  func BenchmarkMutex(b *testing.B) {
   266  	benchmarkMutex(b, false, false)
   267  }
   268  
   269  func BenchmarkMutexSlack(b *testing.B) {
   270  	benchmarkMutex(b, true, false)
   271  }
   272  
   273  func BenchmarkMutexWork(b *testing.B) {
   274  	benchmarkMutex(b, false, true)
   275  }
   276  
   277  func BenchmarkMutexWorkSlack(b *testing.B) {
   278  	benchmarkMutex(b, true, true)
   279  }
   280  
   281  func BenchmarkMutexNoSpin(b *testing.B) {
   282  	// This benchmark models a situation where spinning in the mutex should be
   283  	// non-profitable and allows to confirm that spinning does not do harm.
   284  	// To achieve this we create excess of goroutines most of which do local work.
   285  	// These goroutines yield during local work, so that switching from
   286  	// a blocked goroutine to other goroutines is profitable.
   287  	// As a matter of fact, this benchmark still triggers some spinning in the mutex.
   288  	var m Mutex
   289  	var acc0, acc1 uint64
   290  	b.SetParallelism(4)
   291  	b.RunParallel(func(pb *testing.PB) {
   292  		c := make(chan bool)
   293  		var data [4 << 10]uint64
   294  		for i := 0; pb.Next(); i++ {
   295  			if i%4 == 0 {
   296  				m.Lock()
   297  				acc0 -= 100
   298  				acc1 += 100
   299  				m.Unlock()
   300  			} else {
   301  				for i := 0; i < len(data); i += 4 {
   302  					data[i]++
   303  				}
   304  				// Elaborate way to say runtime.Gosched
   305  				// that does not put the goroutine onto global runq.
   306  				go func() {
   307  					c <- true
   308  				}()
   309  				<-c
   310  			}
   311  		}
   312  	})
   313  }
   314  
   315  func BenchmarkMutexSpin(b *testing.B) {
   316  	// This benchmark models a situation where spinning in the mutex should be
   317  	// profitable. To achieve this we create a goroutine per-proc.
   318  	// These goroutines access considerable amount of local data so that
   319  	// unnecessary rescheduling is penalized by cache misses.
   320  	var m Mutex
   321  	var acc0, acc1 uint64
   322  	b.RunParallel(func(pb *testing.PB) {
   323  		var data [16 << 10]uint64
   324  		for i := 0; pb.Next(); i++ {
   325  			m.Lock()
   326  			acc0 -= 100
   327  			acc1 += 100
   328  			m.Unlock()
   329  			for i := 0; i < len(data); i += 4 {
   330  				data[i]++
   331  			}
   332  		}
   333  	})
   334  }
   335  

View as plain text