Source file src/runtime/os_netbsd.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"internal/runtime/atomic"
    11  	"unsafe"
    12  )
    13  
    14  const (
    15  	_SS_DISABLE  = 4
    16  	_SIG_BLOCK   = 1
    17  	_SIG_UNBLOCK = 2
    18  	_SIG_SETMASK = 3
    19  	_NSIG        = 33
    20  	_SI_USER     = 0
    21  
    22  	// From NetBSD's <sys/ucontext.h>
    23  	_UC_SIGMASK = 0x01
    24  	_UC_CPU     = 0x04
    25  
    26  	// From <sys/lwp.h>
    27  	_LWP_DETACHED = 0x00000040
    28  )
    29  
    30  type mOS struct {
    31  	waitsemacount uint32
    32  }
    33  
    34  //go:noescape
    35  func setitimer(mode int32, new, old *itimerval)
    36  
    37  //go:noescape
    38  func sigaction(sig uint32, new, old *sigactiont)
    39  
    40  //go:noescape
    41  func sigaltstack(new, old *stackt)
    42  
    43  //go:noescape
    44  func sigprocmask(how int32, new, old *sigset)
    45  
    46  //go:noescape
    47  func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
    48  
    49  func lwp_tramp()
    50  
    51  func raiseproc(sig uint32)
    52  
    53  func lwp_kill(tid int32, sig int)
    54  
    55  //go:noescape
    56  func getcontext(ctxt unsafe.Pointer)
    57  
    58  //go:noescape
    59  func lwp_create(ctxt unsafe.Pointer, flags uintptr, lwpid unsafe.Pointer) int32
    60  
    61  //go:noescape
    62  func lwp_park(clockid, flags int32, ts *timespec, unpark int32, hint, unparkhint unsafe.Pointer) int32
    63  
    64  //go:noescape
    65  func lwp_unpark(lwp int32, hint unsafe.Pointer) int32
    66  
    67  func lwp_self() int32
    68  
    69  func osyield()
    70  
    71  //go:nosplit
    72  func osyield_no_g() {
    73  	osyield()
    74  }
    75  
    76  func kqueue() int32
    77  
    78  //go:noescape
    79  func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
    80  
    81  func pipe2(flags int32) (r, w int32, errno int32)
    82  func fcntl(fd, cmd, arg int32) (ret int32, errno int32)
    83  
    84  func issetugid() int32
    85  
    86  const (
    87  	_ESRCH     = 3
    88  	_ETIMEDOUT = 60
    89  
    90  	// From NetBSD's <sys/time.h>
    91  	_CLOCK_REALTIME  = 0
    92  	_CLOCK_VIRTUAL   = 1
    93  	_CLOCK_PROF      = 2
    94  	_CLOCK_MONOTONIC = 3
    95  
    96  	_TIMER_RELTIME = 0
    97  	_TIMER_ABSTIME = 1
    98  )
    99  
   100  var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)}}
   101  
   102  // From NetBSD's <sys/sysctl.h>
   103  const (
   104  	_CTL_KERN   = 1
   105  	_KERN_OSREV = 3
   106  
   107  	_CTL_HW        = 6
   108  	_HW_NCPU       = 3
   109  	_HW_PAGESIZE   = 7
   110  	_HW_NCPUONLINE = 16
   111  )
   112  
   113  func sysctlInt(mib []uint32) (int32, bool) {
   114  	var out int32
   115  	nout := unsafe.Sizeof(out)
   116  	ret := sysctl(&mib[0], uint32(len(mib)), (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
   117  	if ret < 0 {
   118  		return 0, false
   119  	}
   120  	return out, true
   121  }
   122  
   123  func getncpu() int32 {
   124  	if n, ok := sysctlInt([]uint32{_CTL_HW, _HW_NCPUONLINE}); ok {
   125  		return int32(n)
   126  	}
   127  	if n, ok := sysctlInt([]uint32{_CTL_HW, _HW_NCPU}); ok {
   128  		return int32(n)
   129  	}
   130  	return 1
   131  }
   132  
   133  func getPageSize() uintptr {
   134  	mib := [2]uint32{_CTL_HW, _HW_PAGESIZE}
   135  	out := uint32(0)
   136  	nout := unsafe.Sizeof(out)
   137  	ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
   138  	if ret >= 0 {
   139  		return uintptr(out)
   140  	}
   141  	return 0
   142  }
   143  
   144  func getOSRev() int {
   145  	if osrev, ok := sysctlInt([]uint32{_CTL_KERN, _KERN_OSREV}); ok {
   146  		return int(osrev)
   147  	}
   148  	return 0
   149  }
   150  
   151  //go:nosplit
   152  func semacreate(mp *m) {
   153  }
   154  
   155  //go:nosplit
   156  func semasleep(ns int64) int32 {
   157  	gp := getg()
   158  	var deadline int64
   159  	if ns >= 0 {
   160  		deadline = nanotime() + ns
   161  	}
   162  
   163  	for {
   164  		v := atomic.Load(&gp.m.waitsemacount)
   165  		if v > 0 {
   166  			if atomic.Cas(&gp.m.waitsemacount, v, v-1) {
   167  				return 0 // semaphore acquired
   168  			}
   169  			continue
   170  		}
   171  
   172  		// Sleep until unparked by semawakeup or timeout.
   173  		var tsp *timespec
   174  		var ts timespec
   175  		if ns >= 0 {
   176  			wait := deadline - nanotime()
   177  			if wait <= 0 {
   178  				return -1
   179  			}
   180  			ts.setNsec(wait)
   181  			tsp = &ts
   182  		}
   183  		ret := lwp_park(_CLOCK_MONOTONIC, _TIMER_RELTIME, tsp, 0, unsafe.Pointer(&gp.m.waitsemacount), nil)
   184  		if ret == _ETIMEDOUT {
   185  			return -1
   186  		}
   187  	}
   188  }
   189  
   190  //go:nosplit
   191  func semawakeup(mp *m) {
   192  	atomic.Xadd(&mp.waitsemacount, 1)
   193  	// From NetBSD's _lwp_unpark(2) manual:
   194  	// "If the target LWP is not currently waiting, it will return
   195  	// immediately upon the next call to _lwp_park()."
   196  	ret := lwp_unpark(int32(mp.procid), unsafe.Pointer(&mp.waitsemacount))
   197  	if ret != 0 && ret != _ESRCH {
   198  		// semawakeup can be called on signal stack.
   199  		systemstack(func() {
   200  			print("thrwakeup addr=", &mp.waitsemacount, " sem=", mp.waitsemacount, " ret=", ret, "\n")
   201  		})
   202  	}
   203  }
   204  
   205  // May run with m.p==nil, so write barriers are not allowed.
   206  //
   207  //go:nowritebarrier
   208  func newosproc(mp *m) {
   209  	stk := unsafe.Pointer(mp.g0.stack.hi)
   210  	if false {
   211  		print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
   212  	}
   213  
   214  	var uc ucontextt
   215  	getcontext(unsafe.Pointer(&uc))
   216  
   217  	// _UC_SIGMASK does not seem to work here.
   218  	// It would be nice if _UC_SIGMASK and _UC_STACK
   219  	// worked so that we could do all the work setting
   220  	// the sigmask and the stack here, instead of setting
   221  	// the mask here and the stack in netbsdMstart.
   222  	// For now do the blocking manually.
   223  	uc.uc_flags = _UC_SIGMASK | _UC_CPU
   224  	uc.uc_link = nil
   225  	uc.uc_sigmask = sigset_all
   226  
   227  	var oset sigset
   228  	sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
   229  
   230  	lwp_mcontext_init(&uc.uc_mcontext, stk, mp, mp.g0, abi.FuncPCABI0(netbsdMstart))
   231  
   232  	ret := retryOnEAGAIN(func() int32 {
   233  		errno := lwp_create(unsafe.Pointer(&uc), _LWP_DETACHED, unsafe.Pointer(&mp.procid))
   234  		// lwp_create returns negative errno
   235  		return -errno
   236  	})
   237  	sigprocmask(_SIG_SETMASK, &oset, nil)
   238  	if ret != 0 {
   239  		print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", ret, ")\n")
   240  		if ret == _EAGAIN {
   241  			println("runtime: may need to increase max user processes (ulimit -p)")
   242  		}
   243  		throw("runtime.newosproc")
   244  	}
   245  }
   246  
   247  // mstart is the entry-point for new Ms.
   248  // It is written in assembly, uses ABI0, is marked TOPFRAME, and calls netbsdMstart0.
   249  func netbsdMstart()
   250  
   251  // netbsdMstart0 is the function call that starts executing a newly
   252  // created thread. On NetBSD, a new thread inherits the signal stack
   253  // of the creating thread. That confuses minit, so we remove that
   254  // signal stack here before calling the regular mstart. It's a bit
   255  // baroque to remove a signal stack here only to add one in minit, but
   256  // it's a simple change that keeps NetBSD working like other OS's.
   257  // At this point all signals are blocked, so there is no race.
   258  //
   259  //go:nosplit
   260  func netbsdMstart0() {
   261  	st := stackt{ss_flags: _SS_DISABLE}
   262  	sigaltstack(&st, nil)
   263  	mstart0()
   264  }
   265  
   266  func osinit() {
   267  	ncpu = getncpu()
   268  	if physPageSize == 0 {
   269  		physPageSize = getPageSize()
   270  	}
   271  	needSysmonWorkaround = getOSRev() < 902000000 // NetBSD 9.2
   272  }
   273  
   274  var urandom_dev = []byte("/dev/urandom\x00")
   275  
   276  //go:nosplit
   277  func readRandom(r []byte) int {
   278  	fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
   279  	n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
   280  	closefd(fd)
   281  	return int(n)
   282  }
   283  
   284  func goenvs() {
   285  	goenvs_unix()
   286  }
   287  
   288  // Called to initialize a new m (including the bootstrap m).
   289  // Called on the parent thread (main thread in case of bootstrap), can allocate memory.
   290  func mpreinit(mp *m) {
   291  	mp.gsignal = malg(32 * 1024)
   292  	mp.gsignal.m = mp
   293  }
   294  
   295  // Called to initialize a new m (including the bootstrap m).
   296  // Called on the new thread, cannot allocate memory.
   297  func minit() {
   298  	gp := getg()
   299  	gp.m.procid = uint64(lwp_self())
   300  
   301  	// On NetBSD a thread created by pthread_create inherits the
   302  	// signal stack of the creating thread. We always create a
   303  	// new signal stack here, to avoid having two Go threads using
   304  	// the same signal stack. This breaks the case of a thread
   305  	// created in C that calls sigaltstack and then calls a Go
   306  	// function, because we will lose track of the C code's
   307  	// sigaltstack, but it's the best we can do.
   308  	signalstack(&gp.m.gsignal.stack)
   309  	gp.m.newSigstack = true
   310  
   311  	minitSignalMask()
   312  }
   313  
   314  // Called from dropm to undo the effect of an minit.
   315  //
   316  //go:nosplit
   317  func unminit() {
   318  	unminitSignals()
   319  	// Don't clear procid, it is used by locking (semawake), and locking
   320  	// must continue working after unminit.
   321  }
   322  
   323  // Called from mexit, but not from dropm, to undo the effect of thread-owned
   324  // resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
   325  //
   326  // This always runs without a P, so //go:nowritebarrierrec is required.
   327  //go:nowritebarrierrec
   328  func mdestroy(mp *m) {
   329  }
   330  
   331  func sigtramp()
   332  
   333  type sigactiont struct {
   334  	sa_sigaction uintptr
   335  	sa_mask      sigset
   336  	sa_flags     int32
   337  }
   338  
   339  //go:nosplit
   340  //go:nowritebarrierrec
   341  func setsig(i uint32, fn uintptr) {
   342  	var sa sigactiont
   343  	sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
   344  	sa.sa_mask = sigset_all
   345  	if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
   346  		fn = abi.FuncPCABI0(sigtramp)
   347  	}
   348  	sa.sa_sigaction = fn
   349  	sigaction(i, &sa, nil)
   350  }
   351  
   352  //go:nosplit
   353  //go:nowritebarrierrec
   354  func setsigstack(i uint32) {
   355  	throw("setsigstack")
   356  }
   357  
   358  //go:nosplit
   359  //go:nowritebarrierrec
   360  func getsig(i uint32) uintptr {
   361  	var sa sigactiont
   362  	sigaction(i, nil, &sa)
   363  	return sa.sa_sigaction
   364  }
   365  
   366  // setSignalstackSP sets the ss_sp field of a stackt.
   367  //
   368  //go:nosplit
   369  func setSignalstackSP(s *stackt, sp uintptr) {
   370  	s.ss_sp = sp
   371  }
   372  
   373  //go:nosplit
   374  //go:nowritebarrierrec
   375  func sigaddset(mask *sigset, i int) {
   376  	mask.__bits[(i-1)/32] |= 1 << ((uint32(i) - 1) & 31)
   377  }
   378  
   379  func sigdelset(mask *sigset, i int) {
   380  	mask.__bits[(i-1)/32] &^= 1 << ((uint32(i) - 1) & 31)
   381  }
   382  
   383  //go:nosplit
   384  func (c *sigctxt) fixsigcode(sig uint32) {
   385  }
   386  
   387  func setProcessCPUProfiler(hz int32) {
   388  	setProcessCPUProfilerTimer(hz)
   389  }
   390  
   391  func setThreadCPUProfiler(hz int32) {
   392  	setThreadCPUProfilerHz(hz)
   393  }
   394  
   395  //go:nosplit
   396  func validSIGPROF(mp *m, c *sigctxt) bool {
   397  	return true
   398  }
   399  
   400  func sysargs(argc int32, argv **byte) {
   401  	n := argc + 1
   402  
   403  	// skip over argv, envp to get to auxv
   404  	for argv_index(argv, n) != nil {
   405  		n++
   406  	}
   407  
   408  	// skip NULL separator
   409  	n++
   410  
   411  	// now argv+n is auxv
   412  	auxvp := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
   413  	pairs := sysauxv(auxvp[:])
   414  	auxv = auxvp[: pairs*2 : pairs*2]
   415  }
   416  
   417  const (
   418  	_AT_NULL   = 0 // Terminates the vector
   419  	_AT_PAGESZ = 6 // Page size in bytes
   420  )
   421  
   422  func sysauxv(auxv []uintptr) (pairs int) {
   423  	var i int
   424  	for i = 0; auxv[i] != _AT_NULL; i += 2 {
   425  		tag, val := auxv[i], auxv[i+1]
   426  		switch tag {
   427  		case _AT_PAGESZ:
   428  			physPageSize = val
   429  		}
   430  	}
   431  	return i / 2
   432  }
   433  
   434  // raise sends signal to the calling thread.
   435  //
   436  // It must be nosplit because it is used by the signal handler before
   437  // it definitely has a Go stack.
   438  //
   439  //go:nosplit
   440  func raise(sig uint32) {
   441  	lwp_kill(lwp_self(), int(sig))
   442  }
   443  
   444  func signalM(mp *m, sig int) {
   445  	lwp_kill(int32(mp.procid), sig)
   446  }
   447  
   448  // sigPerThreadSyscall is only used on linux, so we assign a bogus signal
   449  // number.
   450  const sigPerThreadSyscall = 1 << 31
   451  
   452  //go:nosplit
   453  func runPerThreadSyscall() {
   454  	throw("runPerThreadSyscall only valid on linux")
   455  }
   456  

View as plain text