Source file src/runtime/os_netbsd.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"internal/runtime/atomic"
    11  	"unsafe"
    12  )
    13  
    14  const (
    15  	_SS_DISABLE  = 4
    16  	_SIG_BLOCK   = 1
    17  	_SIG_UNBLOCK = 2
    18  	_SIG_SETMASK = 3
    19  	_NSIG        = 33
    20  	_SI_USER     = 0
    21  
    22  	// From NetBSD's <sys/ucontext.h>
    23  	_UC_SIGMASK = 0x01
    24  	_UC_CPU     = 0x04
    25  
    26  	// From <sys/lwp.h>
    27  	_LWP_DETACHED = 0x00000040
    28  )
    29  
    30  type mOS struct {
    31  	waitsemacount uint32
    32  }
    33  
    34  //go:noescape
    35  func setitimer(mode int32, new, old *itimerval)
    36  
    37  //go:noescape
    38  func sigaction(sig uint32, new, old *sigactiont)
    39  
    40  //go:noescape
    41  func sigaltstack(new, old *stackt)
    42  
    43  //go:noescape
    44  func sigprocmask(how int32, new, old *sigset)
    45  
    46  //go:noescape
    47  func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
    48  
    49  func lwp_tramp()
    50  
    51  func raiseproc(sig uint32)
    52  
    53  func lwp_kill(tid int32, sig int)
    54  
    55  //go:noescape
    56  func getcontext(ctxt unsafe.Pointer)
    57  
    58  //go:noescape
    59  func lwp_create(ctxt unsafe.Pointer, flags uintptr, lwpid unsafe.Pointer) int32
    60  
    61  //go:noescape
    62  func lwp_park(clockid, flags int32, ts *timespec, unpark int32, hint, unparkhint unsafe.Pointer) int32
    63  
    64  //go:noescape
    65  func lwp_unpark(lwp int32, hint unsafe.Pointer) int32
    66  
    67  func lwp_self() int32
    68  
    69  func osyield()
    70  
    71  //go:nosplit
    72  func osyield_no_g() {
    73  	osyield()
    74  }
    75  
    76  func kqueue() int32
    77  
    78  //go:noescape
    79  func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
    80  
    81  func pipe2(flags int32) (r, w int32, errno int32)
    82  func fcntl(fd, cmd, arg int32) (ret int32, errno int32)
    83  
    84  func issetugid() int32
    85  
    86  const (
    87  	_ESRCH     = 3
    88  	_ETIMEDOUT = 60
    89  
    90  	// From NetBSD's <sys/time.h>
    91  	_CLOCK_REALTIME  = 0
    92  	_CLOCK_VIRTUAL   = 1
    93  	_CLOCK_PROF      = 2
    94  	_CLOCK_MONOTONIC = 3
    95  
    96  	_TIMER_RELTIME = 0
    97  	_TIMER_ABSTIME = 1
    98  )
    99  
   100  var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)}}
   101  
   102  // From NetBSD's <sys/sysctl.h>
   103  const (
   104  	_CTL_HW        = 6
   105  	_HW_NCPU       = 3
   106  	_HW_PAGESIZE   = 7
   107  	_HW_NCPUONLINE = 16
   108  )
   109  
   110  func sysctlInt(mib []uint32) (int32, bool) {
   111  	var out int32
   112  	nout := unsafe.Sizeof(out)
   113  	ret := sysctl(&mib[0], uint32(len(mib)), (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
   114  	if ret < 0 {
   115  		return 0, false
   116  	}
   117  	return out, true
   118  }
   119  
   120  func getCPUCount() int32 {
   121  	if n, ok := sysctlInt([]uint32{_CTL_HW, _HW_NCPUONLINE}); ok {
   122  		return int32(n)
   123  	}
   124  	if n, ok := sysctlInt([]uint32{_CTL_HW, _HW_NCPU}); ok {
   125  		return int32(n)
   126  	}
   127  	return 1
   128  }
   129  
   130  func getPageSize() uintptr {
   131  	mib := [2]uint32{_CTL_HW, _HW_PAGESIZE}
   132  	out := uint32(0)
   133  	nout := unsafe.Sizeof(out)
   134  	ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
   135  	if ret >= 0 {
   136  		return uintptr(out)
   137  	}
   138  	return 0
   139  }
   140  
   141  //go:nosplit
   142  func semacreate(mp *m) {
   143  }
   144  
   145  //go:nosplit
   146  func semasleep(ns int64) int32 {
   147  	gp := getg()
   148  	var deadline int64
   149  	if ns >= 0 {
   150  		deadline = nanotime() + ns
   151  	}
   152  
   153  	for {
   154  		v := atomic.Load(&gp.m.waitsemacount)
   155  		if v > 0 {
   156  			if atomic.Cas(&gp.m.waitsemacount, v, v-1) {
   157  				return 0 // semaphore acquired
   158  			}
   159  			continue
   160  		}
   161  
   162  		// Sleep until unparked by semawakeup or timeout.
   163  		var tsp *timespec
   164  		var ts timespec
   165  		if ns >= 0 {
   166  			wait := deadline - nanotime()
   167  			if wait <= 0 {
   168  				return -1
   169  			}
   170  			ts.setNsec(wait)
   171  			tsp = &ts
   172  		}
   173  		ret := lwp_park(_CLOCK_MONOTONIC, _TIMER_RELTIME, tsp, 0, unsafe.Pointer(&gp.m.waitsemacount), nil)
   174  		if ret == _ETIMEDOUT {
   175  			return -1
   176  		}
   177  	}
   178  }
   179  
   180  //go:nosplit
   181  func semawakeup(mp *m) {
   182  	atomic.Xadd(&mp.waitsemacount, 1)
   183  	// From NetBSD's _lwp_unpark(2) manual:
   184  	// "If the target LWP is not currently waiting, it will return
   185  	// immediately upon the next call to _lwp_park()."
   186  	ret := lwp_unpark(int32(mp.procid), unsafe.Pointer(&mp.waitsemacount))
   187  	if ret != 0 && ret != _ESRCH {
   188  		// semawakeup can be called on signal stack.
   189  		systemstack(func() {
   190  			print("thrwakeup addr=", &mp.waitsemacount, " sem=", mp.waitsemacount, " ret=", ret, "\n")
   191  		})
   192  	}
   193  }
   194  
   195  // May run with m.p==nil, so write barriers are not allowed.
   196  //
   197  //go:nowritebarrier
   198  func newosproc(mp *m) {
   199  	stk := unsafe.Pointer(mp.g0.stack.hi)
   200  	if false {
   201  		print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
   202  	}
   203  
   204  	var uc ucontextt
   205  	getcontext(unsafe.Pointer(&uc))
   206  
   207  	// _UC_SIGMASK does not seem to work here.
   208  	// It would be nice if _UC_SIGMASK and _UC_STACK
   209  	// worked so that we could do all the work setting
   210  	// the sigmask and the stack here, instead of setting
   211  	// the mask here and the stack in netbsdMstart.
   212  	// For now do the blocking manually.
   213  	uc.uc_flags = _UC_SIGMASK | _UC_CPU
   214  	uc.uc_link = nil
   215  	uc.uc_sigmask = sigset_all
   216  
   217  	var oset sigset
   218  	sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
   219  
   220  	lwp_mcontext_init(&uc.uc_mcontext, stk, mp, mp.g0, abi.FuncPCABI0(netbsdMstart))
   221  
   222  	ret := retryOnEAGAIN(func() int32 {
   223  		errno := lwp_create(unsafe.Pointer(&uc), _LWP_DETACHED, unsafe.Pointer(&mp.procid))
   224  		// lwp_create returns negative errno
   225  		return -errno
   226  	})
   227  	sigprocmask(_SIG_SETMASK, &oset, nil)
   228  	if ret != 0 {
   229  		print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", ret, ")\n")
   230  		if ret == _EAGAIN {
   231  			println("runtime: may need to increase max user processes (ulimit -p)")
   232  		}
   233  		throw("runtime.newosproc")
   234  	}
   235  }
   236  
   237  // mstart is the entry-point for new Ms.
   238  // It is written in assembly, uses ABI0, is marked TOPFRAME, and calls netbsdMstart0.
   239  func netbsdMstart()
   240  
   241  // netbsdMstart0 is the function call that starts executing a newly
   242  // created thread. On NetBSD, a new thread inherits the signal stack
   243  // of the creating thread. That confuses minit, so we remove that
   244  // signal stack here before calling the regular mstart. It's a bit
   245  // baroque to remove a signal stack here only to add one in minit, but
   246  // it's a simple change that keeps NetBSD working like other OS's.
   247  // At this point all signals are blocked, so there is no race.
   248  //
   249  //go:nosplit
   250  func netbsdMstart0() {
   251  	st := stackt{ss_flags: _SS_DISABLE}
   252  	sigaltstack(&st, nil)
   253  	mstart0()
   254  }
   255  
   256  func osinit() {
   257  	numCPUStartup = getCPUCount()
   258  	if physPageSize == 0 {
   259  		physPageSize = getPageSize()
   260  	}
   261  }
   262  
   263  var urandom_dev = []byte("/dev/urandom\x00")
   264  
   265  //go:nosplit
   266  func readRandom(r []byte) int {
   267  	fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
   268  	n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
   269  	closefd(fd)
   270  	return int(n)
   271  }
   272  
   273  func goenvs() {
   274  	goenvs_unix()
   275  }
   276  
   277  // Called to initialize a new m (including the bootstrap m).
   278  // Called on the parent thread (main thread in case of bootstrap), can allocate memory.
   279  func mpreinit(mp *m) {
   280  	mp.gsignal = malg(32 * 1024)
   281  	mp.gsignal.m = mp
   282  }
   283  
   284  // Called to initialize a new m (including the bootstrap m).
   285  // Called on the new thread, cannot allocate memory.
   286  func minit() {
   287  	gp := getg()
   288  	gp.m.procid = uint64(lwp_self())
   289  
   290  	// On NetBSD a thread created by pthread_create inherits the
   291  	// signal stack of the creating thread. We always create a
   292  	// new signal stack here, to avoid having two Go threads using
   293  	// the same signal stack. This breaks the case of a thread
   294  	// created in C that calls sigaltstack and then calls a Go
   295  	// function, because we will lose track of the C code's
   296  	// sigaltstack, but it's the best we can do.
   297  	signalstack(&gp.m.gsignal.stack)
   298  	gp.m.newSigstack = true
   299  
   300  	minitSignalMask()
   301  }
   302  
   303  // Called from dropm to undo the effect of an minit.
   304  //
   305  //go:nosplit
   306  func unminit() {
   307  	unminitSignals()
   308  	// Don't clear procid, it is used by locking (semawake), and locking
   309  	// must continue working after unminit.
   310  }
   311  
   312  // Called from mexit, but not from dropm, to undo the effect of thread-owned
   313  // resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
   314  //
   315  // This always runs without a P, so //go:nowritebarrierrec is required.
   316  //
   317  //go:nowritebarrierrec
   318  func mdestroy(mp *m) {
   319  }
   320  
   321  func sigtramp()
   322  
   323  type sigactiont struct {
   324  	sa_sigaction uintptr
   325  	sa_mask      sigset
   326  	sa_flags     int32
   327  }
   328  
   329  //go:nosplit
   330  //go:nowritebarrierrec
   331  func setsig(i uint32, fn uintptr) {
   332  	var sa sigactiont
   333  	sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
   334  	sa.sa_mask = sigset_all
   335  	if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
   336  		fn = abi.FuncPCABI0(sigtramp)
   337  	}
   338  	sa.sa_sigaction = fn
   339  	sigaction(i, &sa, nil)
   340  }
   341  
   342  //go:nosplit
   343  //go:nowritebarrierrec
   344  func setsigstack(i uint32) {
   345  	throw("setsigstack")
   346  }
   347  
   348  //go:nosplit
   349  //go:nowritebarrierrec
   350  func getsig(i uint32) uintptr {
   351  	var sa sigactiont
   352  	sigaction(i, nil, &sa)
   353  	return sa.sa_sigaction
   354  }
   355  
   356  // setSignalstackSP sets the ss_sp field of a stackt.
   357  //
   358  //go:nosplit
   359  func setSignalstackSP(s *stackt, sp uintptr) {
   360  	s.ss_sp = sp
   361  }
   362  
   363  //go:nosplit
   364  //go:nowritebarrierrec
   365  func sigaddset(mask *sigset, i int) {
   366  	mask.__bits[(i-1)/32] |= 1 << ((uint32(i) - 1) & 31)
   367  }
   368  
   369  func sigdelset(mask *sigset, i int) {
   370  	mask.__bits[(i-1)/32] &^= 1 << ((uint32(i) - 1) & 31)
   371  }
   372  
   373  //go:nosplit
   374  func (c *sigctxt) fixsigcode(sig uint32) {
   375  }
   376  
   377  func setProcessCPUProfiler(hz int32) {
   378  	setProcessCPUProfilerTimer(hz)
   379  }
   380  
   381  func setThreadCPUProfiler(hz int32) {
   382  	setThreadCPUProfilerHz(hz)
   383  }
   384  
   385  //go:nosplit
   386  func validSIGPROF(mp *m, c *sigctxt) bool {
   387  	return true
   388  }
   389  
   390  func sysargs(argc int32, argv **byte) {
   391  	n := argc + 1
   392  
   393  	// skip over argv, envp to get to auxv
   394  	for argv_index(argv, n) != nil {
   395  		n++
   396  	}
   397  
   398  	// skip NULL separator
   399  	n++
   400  
   401  	// now argv+n is auxv
   402  	auxvp := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
   403  	pairs := sysauxv(auxvp[:])
   404  	auxv = auxvp[: pairs*2 : pairs*2]
   405  }
   406  
   407  const (
   408  	_AT_NULL   = 0 // Terminates the vector
   409  	_AT_PAGESZ = 6 // Page size in bytes
   410  )
   411  
   412  func sysauxv(auxv []uintptr) (pairs int) {
   413  	var i int
   414  	for i = 0; auxv[i] != _AT_NULL; i += 2 {
   415  		tag, val := auxv[i], auxv[i+1]
   416  		switch tag {
   417  		case _AT_PAGESZ:
   418  			physPageSize = val
   419  		}
   420  	}
   421  	return i / 2
   422  }
   423  
   424  // raise sends signal to the calling thread.
   425  //
   426  // It must be nosplit because it is used by the signal handler before
   427  // it definitely has a Go stack.
   428  //
   429  //go:nosplit
   430  func raise(sig uint32) {
   431  	lwp_kill(lwp_self(), int(sig))
   432  }
   433  
   434  func signalM(mp *m, sig int) {
   435  	lwp_kill(int32(mp.procid), sig)
   436  }
   437  
   438  // sigPerThreadSyscall is only used on linux, so we assign a bogus signal
   439  // number.
   440  const sigPerThreadSyscall = 1 << 31
   441  
   442  //go:nosplit
   443  func runPerThreadSyscall() {
   444  	throw("runPerThreadSyscall only valid on linux")
   445  }
   446  

View as plain text