Source file src/sync/pool_test.go

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Pool is no-op under race detector, so all these tests do not work.
     6  //
     7  //go:build !race
     8  
     9  package sync_test
    10  
    11  import (
    12  	"runtime"
    13  	"runtime/debug"
    14  	"slices"
    15  	. "sync"
    16  	"sync/atomic"
    17  	"testing"
    18  	"time"
    19  )
    20  
    21  func TestPool(t *testing.T) {
    22  	// disable GC so we can control when it happens.
    23  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
    24  	var p Pool
    25  	if p.Get() != nil {
    26  		t.Fatal("expected empty")
    27  	}
    28  
    29  	// Make sure that the goroutine doesn't migrate to another P
    30  	// between Put and Get calls.
    31  	Runtime_procPin()
    32  	p.Put("a")
    33  	p.Put("b")
    34  	if g := p.Get(); g != "a" {
    35  		t.Fatalf("got %#v; want a", g)
    36  	}
    37  	if g := p.Get(); g != "b" {
    38  		t.Fatalf("got %#v; want b", g)
    39  	}
    40  	if g := p.Get(); g != nil {
    41  		t.Fatalf("got %#v; want nil", g)
    42  	}
    43  	Runtime_procUnpin()
    44  
    45  	// Put in a large number of objects so they spill into
    46  	// stealable space.
    47  	for i := 0; i < 100; i++ {
    48  		p.Put("c")
    49  	}
    50  	// After one GC, the victim cache should keep them alive.
    51  	runtime.GC()
    52  	if g := p.Get(); g != "c" {
    53  		t.Fatalf("got %#v; want c after GC", g)
    54  	}
    55  	// A second GC should drop the victim cache.
    56  	runtime.GC()
    57  	if g := p.Get(); g != nil {
    58  		t.Fatalf("got %#v; want nil after second GC", g)
    59  	}
    60  }
    61  
    62  func TestPoolNew(t *testing.T) {
    63  	// disable GC so we can control when it happens.
    64  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
    65  
    66  	i := 0
    67  	p := Pool{
    68  		New: func() any {
    69  			i++
    70  			return i
    71  		},
    72  	}
    73  	if v := p.Get(); v != 1 {
    74  		t.Fatalf("got %v; want 1", v)
    75  	}
    76  	if v := p.Get(); v != 2 {
    77  		t.Fatalf("got %v; want 2", v)
    78  	}
    79  
    80  	// Make sure that the goroutine doesn't migrate to another P
    81  	// between Put and Get calls.
    82  	Runtime_procPin()
    83  	p.Put(42)
    84  	if v := p.Get(); v != 42 {
    85  		t.Fatalf("got %v; want 42", v)
    86  	}
    87  	Runtime_procUnpin()
    88  
    89  	if v := p.Get(); v != 3 {
    90  		t.Fatalf("got %v; want 3", v)
    91  	}
    92  }
    93  
    94  // Test that Pool does not hold pointers to previously cached resources.
    95  func TestPoolGC(t *testing.T) {
    96  	testPool(t, true)
    97  }
    98  
    99  // Test that Pool releases resources on GC.
   100  func TestPoolRelease(t *testing.T) {
   101  	testPool(t, false)
   102  }
   103  
   104  func testPool(t *testing.T, drain bool) {
   105  	var p Pool
   106  	const N = 100
   107  loop:
   108  	for try := 0; try < 3; try++ {
   109  		if try == 1 && testing.Short() {
   110  			break
   111  		}
   112  		var cln, cln1 uint32
   113  		for i := 0; i < N; i++ {
   114  			v := new(string)
   115  			runtime.AddCleanup(v, func(f *uint32) { atomic.AddUint32(f, 1) }, &cln)
   116  			p.Put(v)
   117  		}
   118  		if drain {
   119  			for i := 0; i < N; i++ {
   120  				p.Get()
   121  			}
   122  		}
   123  		for i := 0; i < 5; i++ {
   124  			runtime.GC()
   125  			time.Sleep(time.Duration(i*100+10) * time.Millisecond)
   126  			// 1 pointer can remain on stack or elsewhere
   127  			if cln1 = atomic.LoadUint32(&cln); cln1 >= N-1 {
   128  				continue loop
   129  			}
   130  		}
   131  		t.Fatalf("only %v out of %v resources are cleaned up on try %v", cln1, N, try)
   132  	}
   133  }
   134  
   135  func TestPoolStress(t *testing.T) {
   136  	const P = 10
   137  	N := int(1e6)
   138  	if testing.Short() {
   139  		N /= 100
   140  	}
   141  	var p Pool
   142  	done := make(chan bool)
   143  	for i := 0; i < P; i++ {
   144  		go func() {
   145  			var v any = 0
   146  			for j := 0; j < N; j++ {
   147  				if v == nil {
   148  					v = 0
   149  				}
   150  				p.Put(v)
   151  				v = p.Get()
   152  				if v != nil && v.(int) != 0 {
   153  					t.Errorf("expect 0, got %v", v)
   154  					break
   155  				}
   156  			}
   157  			done <- true
   158  		}()
   159  	}
   160  	for i := 0; i < P; i++ {
   161  		<-done
   162  	}
   163  }
   164  
   165  func TestPoolDequeue(t *testing.T) {
   166  	testPoolDequeue(t, NewPoolDequeue(16))
   167  }
   168  
   169  func TestPoolChain(t *testing.T) {
   170  	testPoolDequeue(t, NewPoolChain())
   171  }
   172  
   173  func testPoolDequeue(t *testing.T, d PoolDequeue) {
   174  	const P = 10
   175  	var N int = 2e6
   176  	if testing.Short() {
   177  		N = 1e3
   178  	}
   179  	have := make([]int32, N)
   180  	var stop int32
   181  	var wg WaitGroup
   182  	record := func(val int) {
   183  		atomic.AddInt32(&have[val], 1)
   184  		if val == N-1 {
   185  			atomic.StoreInt32(&stop, 1)
   186  		}
   187  	}
   188  
   189  	// Start P-1 consumers.
   190  	for i := 1; i < P; i++ {
   191  		wg.Add(1)
   192  		go func() {
   193  			fail := 0
   194  			for atomic.LoadInt32(&stop) == 0 {
   195  				val, ok := d.PopTail()
   196  				if ok {
   197  					fail = 0
   198  					record(val.(int))
   199  				} else {
   200  					// Speed up the test by
   201  					// allowing the pusher to run.
   202  					if fail++; fail%100 == 0 {
   203  						runtime.Gosched()
   204  					}
   205  				}
   206  			}
   207  			wg.Done()
   208  		}()
   209  	}
   210  
   211  	// Start 1 producer.
   212  	nPopHead := 0
   213  	wg.Add(1)
   214  	go func() {
   215  		for j := 0; j < N; j++ {
   216  			for !d.PushHead(j) {
   217  				// Allow a popper to run.
   218  				runtime.Gosched()
   219  			}
   220  			if j%10 == 0 {
   221  				val, ok := d.PopHead()
   222  				if ok {
   223  					nPopHead++
   224  					record(val.(int))
   225  				}
   226  			}
   227  		}
   228  		wg.Done()
   229  	}()
   230  	wg.Wait()
   231  
   232  	// Check results.
   233  	for i, count := range have {
   234  		if count != 1 {
   235  			t.Errorf("expected have[%d] = 1, got %d", i, count)
   236  		}
   237  	}
   238  	// Check that at least some PopHeads succeeded. We skip this
   239  	// check in short mode because it's common enough that the
   240  	// queue will stay nearly empty all the time and a PopTail
   241  	// will happen during the window between every PushHead and
   242  	// PopHead.
   243  	if !testing.Short() && nPopHead == 0 {
   244  		t.Errorf("popHead never succeeded")
   245  	}
   246  }
   247  
   248  func TestNilPool(t *testing.T) {
   249  	catch := func() {
   250  		if recover() == nil {
   251  			t.Error("expected panic")
   252  		}
   253  	}
   254  
   255  	var p *Pool
   256  	t.Run("Get", func(t *testing.T) {
   257  		defer catch()
   258  		if p.Get() != nil {
   259  			t.Error("expected empty")
   260  		}
   261  		t.Error("should have panicked already")
   262  	})
   263  	t.Run("Put", func(t *testing.T) {
   264  		defer catch()
   265  		p.Put("a")
   266  		t.Error("should have panicked already")
   267  	})
   268  }
   269  
   270  func BenchmarkPool(b *testing.B) {
   271  	var p Pool
   272  	b.RunParallel(func(pb *testing.PB) {
   273  		for pb.Next() {
   274  			p.Put(1)
   275  			p.Get()
   276  		}
   277  	})
   278  }
   279  
   280  func BenchmarkPoolOverflow(b *testing.B) {
   281  	var p Pool
   282  	b.RunParallel(func(pb *testing.PB) {
   283  		for pb.Next() {
   284  			for b := 0; b < 100; b++ {
   285  				p.Put(1)
   286  			}
   287  			for b := 0; b < 100; b++ {
   288  				p.Get()
   289  			}
   290  		}
   291  	})
   292  }
   293  
   294  // Simulate object starvation in order to force Ps to steal objects
   295  // from other Ps.
   296  func BenchmarkPoolStarvation(b *testing.B) {
   297  	var p Pool
   298  	count := 100
   299  	// Reduce number of putted objects by 33 %. It creates objects starvation
   300  	// that force P-local storage to steal objects from other Ps.
   301  	countStarved := count - int(float32(count)*0.33)
   302  	b.RunParallel(func(pb *testing.PB) {
   303  		for pb.Next() {
   304  			for b := 0; b < countStarved; b++ {
   305  				p.Put(1)
   306  			}
   307  			for b := 0; b < count; b++ {
   308  				p.Get()
   309  			}
   310  		}
   311  	})
   312  }
   313  
   314  var globalSink any
   315  
   316  func BenchmarkPoolSTW(b *testing.B) {
   317  	// Take control of GC.
   318  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   319  
   320  	var mstats runtime.MemStats
   321  	var pauses []uint64
   322  
   323  	var p Pool
   324  	for i := 0; i < b.N; i++ {
   325  		// Put a large number of items into a pool.
   326  		const N = 100000
   327  		var item any = 42
   328  		for i := 0; i < N; i++ {
   329  			p.Put(item)
   330  		}
   331  		// Do a GC.
   332  		runtime.GC()
   333  		// Record pause time.
   334  		runtime.ReadMemStats(&mstats)
   335  		pauses = append(pauses, mstats.PauseNs[(mstats.NumGC+255)%256])
   336  	}
   337  
   338  	// Get pause time stats.
   339  	slices.Sort(pauses)
   340  	var total uint64
   341  	for _, ns := range pauses {
   342  		total += ns
   343  	}
   344  	// ns/op for this benchmark is average STW time.
   345  	b.ReportMetric(float64(total)/float64(b.N), "ns/op")
   346  	b.ReportMetric(float64(pauses[len(pauses)*95/100]), "p95-ns/STW")
   347  	b.ReportMetric(float64(pauses[len(pauses)*50/100]), "p50-ns/STW")
   348  }
   349  
   350  func BenchmarkPoolExpensiveNew(b *testing.B) {
   351  	// Populate a pool with items that are expensive to construct
   352  	// to stress pool cleanup and subsequent reconstruction.
   353  
   354  	// Create a ballast so the GC has a non-zero heap size and
   355  	// runs at reasonable times.
   356  	globalSink = make([]byte, 8<<20)
   357  	defer func() { globalSink = nil }()
   358  
   359  	// Create a pool that's "expensive" to fill.
   360  	var p Pool
   361  	var nNew uint64
   362  	p.New = func() any {
   363  		atomic.AddUint64(&nNew, 1)
   364  		time.Sleep(time.Millisecond)
   365  		return 42
   366  	}
   367  	var mstats1, mstats2 runtime.MemStats
   368  	runtime.ReadMemStats(&mstats1)
   369  	b.RunParallel(func(pb *testing.PB) {
   370  		// Simulate 100X the number of goroutines having items
   371  		// checked out from the Pool simultaneously.
   372  		items := make([]any, 100)
   373  		var sink []byte
   374  		for pb.Next() {
   375  			// Stress the pool.
   376  			for i := range items {
   377  				items[i] = p.Get()
   378  				// Simulate doing some work with this
   379  				// item checked out.
   380  				sink = make([]byte, 32<<10)
   381  			}
   382  			for i, v := range items {
   383  				p.Put(v)
   384  				items[i] = nil
   385  			}
   386  		}
   387  		_ = sink
   388  	})
   389  	runtime.ReadMemStats(&mstats2)
   390  
   391  	b.ReportMetric(float64(mstats2.NumGC-mstats1.NumGC)/float64(b.N), "GCs/op")
   392  	b.ReportMetric(float64(nNew)/float64(b.N), "New/op")
   393  }
   394  

View as plain text