Source file
src/runtime/export_test.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/goexperiment"
13 "internal/goos"
14 "internal/runtime/atomic"
15 "internal/runtime/gc"
16 "internal/runtime/sys"
17 "unsafe"
18 )
19
20 var Fadd64 = fadd64
21 var Fsub64 = fsub64
22 var Fmul64 = fmul64
23 var Fdiv64 = fdiv64
24 var F64to32 = f64to32
25 var F32to64 = f32to64
26 var Fcmp64 = fcmp64
27 var Fintto64 = fintto64
28 var F64toint = f64toint
29
30 var Entersyscall = entersyscall
31 var Exitsyscall = exitsyscall
32 var LockedOSThread = lockedOSThread
33 var Xadduintptr = atomic.Xadduintptr
34
35 var ReadRandomFailed = &readRandomFailed
36
37 var Fastlog2 = fastlog2
38
39 var ParseByteCount = parseByteCount
40
41 var Nanotime = nanotime
42 var Cputicks = cputicks
43 var CyclesPerSecond = pprof_cyclesPerSecond
44 var NetpollBreak = netpollBreak
45 var Usleep = usleep
46
47 var PhysPageSize = physPageSize
48 var PhysHugePageSize = physHugePageSize
49
50 var NetpollGenericInit = netpollGenericInit
51
52 var Memmove = memmove
53 var MemclrNoHeapPointers = memclrNoHeapPointers
54
55 var CgoCheckPointer = cgoCheckPointer
56
57 const CrashStackImplemented = crashStackImplemented
58
59 const TracebackInnerFrames = tracebackInnerFrames
60 const TracebackOuterFrames = tracebackOuterFrames
61
62 var MapKeys = keys
63 var MapValues = values
64
65 var LockPartialOrder = lockPartialOrder
66
67 type TimeTimer = timeTimer
68
69 type LockRank lockRank
70
71 func (l LockRank) String() string {
72 return lockRank(l).String()
73 }
74
75 const PreemptMSupported = preemptMSupported
76
77 type LFNode struct {
78 Next uint64
79 Pushcnt uintptr
80 }
81
82 func LFStackPush(head *uint64, node *LFNode) {
83 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
84 }
85
86 func LFStackPop(head *uint64) *LFNode {
87 return (*LFNode)((*lfstack)(head).pop())
88 }
89 func LFNodeValidate(node *LFNode) {
90 lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
91 }
92
93 func Netpoll(delta int64) {
94 systemstack(func() {
95 netpoll(delta)
96 })
97 }
98
99 func PointerMask(x any) (ret []byte) {
100 systemstack(func() {
101 ret = pointerMask(x)
102 })
103 return
104 }
105
106 func RunSchedLocalQueueTest() {
107 pp := new(p)
108 gs := make([]g, len(pp.runq))
109 Escape(gs)
110 for i := 0; i < len(pp.runq); i++ {
111 if g, _ := runqget(pp); g != nil {
112 throw("runq is not empty initially")
113 }
114 for j := 0; j < i; j++ {
115 runqput(pp, &gs[i], false)
116 }
117 for j := 0; j < i; j++ {
118 if g, _ := runqget(pp); g != &gs[i] {
119 print("bad element at iter ", i, "/", j, "\n")
120 throw("bad element")
121 }
122 }
123 if g, _ := runqget(pp); g != nil {
124 throw("runq is not empty afterwards")
125 }
126 }
127 }
128
129 func RunSchedLocalQueueStealTest() {
130 p1 := new(p)
131 p2 := new(p)
132 gs := make([]g, len(p1.runq))
133 Escape(gs)
134 for i := 0; i < len(p1.runq); i++ {
135 for j := 0; j < i; j++ {
136 gs[j].sig = 0
137 runqput(p1, &gs[j], false)
138 }
139 gp := runqsteal(p2, p1, true)
140 s := 0
141 if gp != nil {
142 s++
143 gp.sig++
144 }
145 for {
146 gp, _ = runqget(p2)
147 if gp == nil {
148 break
149 }
150 s++
151 gp.sig++
152 }
153 for {
154 gp, _ = runqget(p1)
155 if gp == nil {
156 break
157 }
158 gp.sig++
159 }
160 for j := 0; j < i; j++ {
161 if gs[j].sig != 1 {
162 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
163 throw("bad element")
164 }
165 }
166 if s != i/2 && s != i/2+1 {
167 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
168 throw("bad steal")
169 }
170 }
171 }
172
173 func RunSchedLocalQueueEmptyTest(iters int) {
174
175
176
177
178 done := make(chan bool, 1)
179 p := new(p)
180 gs := make([]g, 2)
181 Escape(gs)
182 ready := new(uint32)
183 for i := 0; i < iters; i++ {
184 *ready = 0
185 next0 := (i & 1) == 0
186 next1 := (i & 2) == 0
187 runqput(p, &gs[0], next0)
188 go func() {
189 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
190 }
191 if runqempty(p) {
192 println("next:", next0, next1)
193 throw("queue is empty")
194 }
195 done <- true
196 }()
197 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
198 }
199 runqput(p, &gs[1], next1)
200 runqget(p)
201 <-done
202 runqget(p)
203 }
204 }
205
206 var (
207 StringHash = stringHash
208 BytesHash = bytesHash
209 Int32Hash = int32Hash
210 Int64Hash = int64Hash
211 MemHash = memhash
212 MemHash32 = memhash32
213 MemHash64 = memhash64
214 EfaceHash = efaceHash
215 IfaceHash = ifaceHash
216 )
217
218 var UseAeshash = &useAeshash
219
220 func MemclrBytes(b []byte) {
221 s := (*slice)(unsafe.Pointer(&b))
222 memclrNoHeapPointers(s.array, uintptr(s.len))
223 }
224
225 const HashLoad = hashLoad
226
227
228 func GostringW(w []uint16) (s string) {
229 systemstack(func() {
230 s = gostringw(&w[0])
231 })
232 return
233 }
234
235 var Open = open
236 var Close = closefd
237 var Read = read
238 var Write = write
239
240 func Envs() []string { return envs }
241 func SetEnvs(e []string) { envs = e }
242
243 const PtrSize = goarch.PtrSize
244
245 var ForceGCPeriod = &forcegcperiod
246
247
248
249
250 func SetTracebackEnv(level string) {
251 setTraceback(level)
252 traceback_env = traceback_cache
253 }
254
255 var ReadUnaligned32 = readUnaligned32
256 var ReadUnaligned64 = readUnaligned64
257
258 func CountPagesInUse() (pagesInUse, counted uintptr) {
259 stw := stopTheWorld(stwForTestCountPagesInUse)
260
261 pagesInUse = mheap_.pagesInUse.Load()
262
263 for _, s := range mheap_.allspans {
264 if s.state.get() == mSpanInUse {
265 counted += s.npages
266 }
267 }
268
269 startTheWorld(stw)
270
271 return
272 }
273
274 func Fastrand() uint32 { return uint32(rand()) }
275 func Fastrand64() uint64 { return rand() }
276 func Fastrandn(n uint32) uint32 { return randn(n) }
277
278 type ProfBuf profBuf
279
280 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
281 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
282 }
283
284 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
285 (*profBuf)(p).write(tag, now, hdr, stk)
286 }
287
288 const (
289 ProfBufBlocking = profBufBlocking
290 ProfBufNonBlocking = profBufNonBlocking
291 )
292
293 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
294 return (*profBuf)(p).read(mode)
295 }
296
297 func (p *ProfBuf) Close() {
298 (*profBuf)(p).close()
299 }
300
301 type CPUStats = cpuStats
302
303 func ReadCPUStats() CPUStats {
304 return work.cpuStats
305 }
306
307 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
308 stw := stopTheWorld(stwForTestReadMetricsSlow)
309
310
311
312 metricsLock()
313 initMetrics()
314
315 systemstack(func() {
316
317
318 getg().racectx = getg().m.curg.racectx
319
320
321
322
323
324
325 readMetricsLocked(samplesp, len, cap)
326
327
328
329
330
331 readmemstats_m(memStats)
332
333
334
335
336 readMetricsLocked(samplesp, len, cap)
337
338
339 getg().racectx = 0
340 })
341 metricsUnlock()
342
343 startTheWorld(stw)
344 }
345
346 var DoubleCheckReadMemStats = &doubleCheckReadMemStats
347
348
349
350 func ReadMemStatsSlow() (base, slow MemStats) {
351 stw := stopTheWorld(stwForTestReadMemStatsSlow)
352
353
354 systemstack(func() {
355
356 getg().m.mallocing++
357
358 readmemstats_m(&base)
359
360
361
362 slow = base
363 slow.Alloc = 0
364 slow.TotalAlloc = 0
365 slow.Mallocs = 0
366 slow.Frees = 0
367 slow.HeapReleased = 0
368 var bySize [gc.NumSizeClasses]struct {
369 Mallocs, Frees uint64
370 }
371
372
373 for _, s := range mheap_.allspans {
374 if s.state.get() != mSpanInUse {
375 continue
376 }
377 if s.isUnusedUserArenaChunk() {
378 continue
379 }
380 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
381 slow.Mallocs++
382 slow.Alloc += uint64(s.elemsize)
383 } else {
384 slow.Mallocs += uint64(s.allocCount)
385 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
386 bySize[sizeclass].Mallocs += uint64(s.allocCount)
387 }
388 }
389
390
391 var m heapStatsDelta
392 memstats.heapStats.unsafeRead(&m)
393
394
395 var smallFree uint64
396 for i := 0; i < gc.NumSizeClasses; i++ {
397 slow.Frees += m.smallFreeCount[i]
398 bySize[i].Frees += m.smallFreeCount[i]
399 bySize[i].Mallocs += m.smallFreeCount[i]
400 smallFree += m.smallFreeCount[i] * uint64(gc.SizeClassToSize[i])
401 }
402 slow.Frees += m.tinyAllocCount + m.largeFreeCount
403 slow.Mallocs += slow.Frees
404
405 slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
406
407 for i := range slow.BySize {
408 slow.BySize[i].Mallocs = bySize[i].Mallocs
409 slow.BySize[i].Frees = bySize[i].Frees
410 }
411
412 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
413 chunk := mheap_.pages.tryChunkOf(i)
414 if chunk == nil {
415 continue
416 }
417 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
418 slow.HeapReleased += uint64(pg) * pageSize
419 }
420 for _, p := range allp {
421
422 pg := sys.OnesCount64(p.pcache.cache & p.pcache.scav)
423 slow.HeapReleased += uint64(pg) * pageSize
424 }
425
426 getg().m.mallocing--
427 })
428
429 startTheWorld(stw)
430 return
431 }
432
433
434
435
436 func ShrinkStackAndVerifyFramePointers() {
437 before := stackPoisonCopy
438 defer func() { stackPoisonCopy = before }()
439 stackPoisonCopy = 1
440
441 gp := getg()
442 systemstack(func() {
443 shrinkstack(gp)
444 })
445
446
447 FPCallers(make([]uintptr, 1024))
448 }
449
450
451
452
453 func BlockOnSystemStack() {
454 systemstack(blockOnSystemStackInternal)
455 }
456
457 func blockOnSystemStackInternal() {
458 print("x\n")
459 lock(&deadlock)
460 lock(&deadlock)
461 }
462
463 type RWMutex struct {
464 rw rwmutex
465 }
466
467 func (rw *RWMutex) Init() {
468 rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
469 }
470
471 func (rw *RWMutex) RLock() {
472 rw.rw.rlock()
473 }
474
475 func (rw *RWMutex) RUnlock() {
476 rw.rw.runlock()
477 }
478
479 func (rw *RWMutex) Lock() {
480 rw.rw.lock()
481 }
482
483 func (rw *RWMutex) Unlock() {
484 rw.rw.unlock()
485 }
486
487 func LockOSCounts() (external, internal uint32) {
488 gp := getg()
489 if gp.m.lockedExt+gp.m.lockedInt == 0 {
490 if gp.lockedm != 0 {
491 panic("lockedm on non-locked goroutine")
492 }
493 } else {
494 if gp.lockedm == 0 {
495 panic("nil lockedm on locked goroutine")
496 }
497 }
498 return gp.m.lockedExt, gp.m.lockedInt
499 }
500
501
502 func TracebackSystemstack(stk []uintptr, i int) int {
503 if i == 0 {
504 pc, sp := sys.GetCallerPC(), sys.GetCallerSP()
505 var u unwinder
506 u.initAt(pc, sp, 0, getg(), unwindJumpStack)
507 return tracebackPCs(&u, 0, stk)
508 }
509 n := 0
510 systemstack(func() {
511 n = TracebackSystemstack(stk, i-1)
512 })
513 return n
514 }
515
516 func KeepNArenaHints(n int) {
517 hint := mheap_.arenaHints
518 for i := 1; i < n; i++ {
519 hint = hint.next
520 if hint == nil {
521 return
522 }
523 }
524 hint.next = nil
525 }
526
527
528
529
530
531
532
533 func MapNextArenaHint() (start, end uintptr, ok bool) {
534 hint := mheap_.arenaHints
535 addr := hint.addr
536 if hint.down {
537 start, end = addr-heapArenaBytes, addr
538 addr -= physPageSize
539 } else {
540 start, end = addr, addr+heapArenaBytes
541 }
542 got := sysReserve(unsafe.Pointer(addr), physPageSize, "")
543 ok = (addr == uintptr(got))
544 if !ok {
545
546
547 sysFreeOS(got, physPageSize)
548 }
549 return
550 }
551
552 func GetNextArenaHint() uintptr {
553 return mheap_.arenaHints.addr
554 }
555
556 type G = g
557
558 type Sudog = sudog
559
560 func Getg() *G {
561 return getg()
562 }
563
564 func Goid() uint64 {
565 return getg().goid
566 }
567
568 func GIsWaitingOnMutex(gp *G) bool {
569 return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
570 }
571
572 var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
573
574
575 func PanicForTesting(b []byte, i int) byte {
576 return unexportedPanicForTesting(b, i)
577 }
578
579
580 func unexportedPanicForTesting(b []byte, i int) byte {
581 return b[i]
582 }
583
584 func G0StackOverflow() {
585 systemstack(func() {
586 g0 := getg()
587 sp := sys.GetCallerSP()
588
589
590
591 g0.stack.lo = sp - 4096 - stackSystem
592 g0.stackguard0 = g0.stack.lo + stackGuard
593 g0.stackguard1 = g0.stackguard0
594
595 stackOverflow(nil)
596 })
597 }
598
599 func stackOverflow(x *byte) {
600 var buf [256]byte
601 stackOverflow(&buf[0])
602 }
603
604 func RunGetgThreadSwitchTest() {
605
606
607
608
609
610
611 ch := make(chan int)
612 go func(ch chan int) {
613 ch <- 5
614 LockOSThread()
615 }(ch)
616
617 g1 := getg()
618
619
620
621
622
623 <-ch
624
625 g2 := getg()
626 if g1 != g2 {
627 panic("g1 != g2")
628 }
629
630
631
632 g3 := getg()
633 if g1 != g3 {
634 panic("g1 != g3")
635 }
636 }
637
638 const (
639 PageSize = pageSize
640 PallocChunkPages = pallocChunkPages
641 PageAlloc64Bit = pageAlloc64Bit
642 PallocSumBytes = pallocSumBytes
643 )
644
645
646 type PallocSum pallocSum
647
648 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
649 func (m PallocSum) Start() uint { return pallocSum(m).start() }
650 func (m PallocSum) Max() uint { return pallocSum(m).max() }
651 func (m PallocSum) End() uint { return pallocSum(m).end() }
652
653
654 type PallocBits pallocBits
655
656 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
657 return (*pallocBits)(b).find(npages, searchIdx)
658 }
659 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
660 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
661 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
662 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
663
664
665
666 func SummarizeSlow(b *PallocBits) PallocSum {
667 var start, most, end uint
668
669 const N = uint(len(b)) * 64
670 for start < N && (*pageBits)(b).get(start) == 0 {
671 start++
672 }
673 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
674 end++
675 }
676 run := uint(0)
677 for i := uint(0); i < N; i++ {
678 if (*pageBits)(b).get(i) == 0 {
679 run++
680 } else {
681 run = 0
682 }
683 most = max(most, run)
684 }
685 return PackPallocSum(start, most, end)
686 }
687
688
689 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
690
691
692
693 func DiffPallocBits(a, b *PallocBits) []BitRange {
694 ba := (*pageBits)(a)
695 bb := (*pageBits)(b)
696
697 var d []BitRange
698 base, size := uint(0), uint(0)
699 for i := uint(0); i < uint(len(ba))*64; i++ {
700 if ba.get(i) != bb.get(i) {
701 if size == 0 {
702 base = i
703 }
704 size++
705 } else {
706 if size != 0 {
707 d = append(d, BitRange{base, size})
708 }
709 size = 0
710 }
711 }
712 if size != 0 {
713 d = append(d, BitRange{base, size})
714 }
715 return d
716 }
717
718
719
720
721 func StringifyPallocBits(b *PallocBits, r BitRange) string {
722 str := ""
723 for j := r.I; j < r.I+r.N; j++ {
724 if (*pageBits)(b).get(j) != 0 {
725 str += "1"
726 } else {
727 str += "0"
728 }
729 }
730 return str
731 }
732
733
734 type PallocData pallocData
735
736 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
737 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
738 }
739 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
740 func (d *PallocData) ScavengedSetRange(i, n uint) {
741 (*pallocData)(d).scavenged.setRange(i, n)
742 }
743 func (d *PallocData) PallocBits() *PallocBits {
744 return (*PallocBits)(&(*pallocData)(d).pallocBits)
745 }
746 func (d *PallocData) Scavenged() *PallocBits {
747 return (*PallocBits)(&(*pallocData)(d).scavenged)
748 }
749
750
751 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
752
753
754 type PageCache pageCache
755
756 const PageCachePages = pageCachePages
757
758 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
759 return PageCache(pageCache{base: base, cache: cache, scav: scav})
760 }
761 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
762 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
763 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
764 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
765 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
766 return (*pageCache)(c).alloc(npages)
767 }
768 func (c *PageCache) Flush(s *PageAlloc) {
769 cp := (*pageCache)(c)
770 sp := (*pageAlloc)(s)
771
772 systemstack(func() {
773
774
775 lock(sp.mheapLock)
776 cp.flush(sp)
777 unlock(sp.mheapLock)
778 })
779 }
780
781
782 type ChunkIdx chunkIdx
783
784
785
786 type PageAlloc pageAlloc
787
788 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
789 pp := (*pageAlloc)(p)
790
791 var addr, scav uintptr
792 systemstack(func() {
793
794
795 lock(pp.mheapLock)
796 addr, scav = pp.alloc(npages)
797 unlock(pp.mheapLock)
798 })
799 return addr, scav
800 }
801 func (p *PageAlloc) AllocToCache() PageCache {
802 pp := (*pageAlloc)(p)
803
804 var c PageCache
805 systemstack(func() {
806
807
808 lock(pp.mheapLock)
809 c = PageCache(pp.allocToCache())
810 unlock(pp.mheapLock)
811 })
812 return c
813 }
814 func (p *PageAlloc) Free(base, npages uintptr) {
815 pp := (*pageAlloc)(p)
816
817 systemstack(func() {
818
819
820 lock(pp.mheapLock)
821 pp.free(base, npages)
822 unlock(pp.mheapLock)
823 })
824 }
825 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
826 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
827 }
828 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
829 pp := (*pageAlloc)(p)
830 systemstack(func() {
831 r = pp.scavenge(nbytes, nil, true)
832 })
833 return
834 }
835 func (p *PageAlloc) InUse() []AddrRange {
836 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
837 for _, r := range p.inUse.ranges {
838 ranges = append(ranges, AddrRange{r})
839 }
840 return ranges
841 }
842
843
844 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
845 ci := chunkIdx(i)
846 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
847 }
848
849
850 type AddrRange struct {
851 addrRange
852 }
853
854
855 func MakeAddrRange(base, limit uintptr) AddrRange {
856 return AddrRange{makeAddrRange(base, limit)}
857 }
858
859
860 func (a AddrRange) Base() uintptr {
861 return a.addrRange.base.addr()
862 }
863
864
865 func (a AddrRange) Limit() uintptr {
866 return a.addrRange.limit.addr()
867 }
868
869
870 func (a AddrRange) Equals(b AddrRange) bool {
871 return a == b
872 }
873
874
875 func (a AddrRange) Size() uintptr {
876 return a.addrRange.size()
877 }
878
879
880
881
882
883 var testSysStat = &memstats.other_sys
884
885
886 type AddrRanges struct {
887 addrRanges
888 mutable bool
889 }
890
891
892
893
894
895
896
897
898
899
900 func NewAddrRanges() AddrRanges {
901 r := addrRanges{}
902 r.init(testSysStat)
903 return AddrRanges{r, true}
904 }
905
906
907
908
909
910
911 func MakeAddrRanges(a ...AddrRange) AddrRanges {
912
913
914
915
916
917 ranges := make([]addrRange, 0, len(a))
918 total := uintptr(0)
919 for _, r := range a {
920 ranges = append(ranges, r.addrRange)
921 total += r.Size()
922 }
923 return AddrRanges{addrRanges{
924 ranges: ranges,
925 totalBytes: total,
926 sysStat: testSysStat,
927 }, false}
928 }
929
930
931
932 func (a *AddrRanges) Ranges() []AddrRange {
933 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
934 for _, r := range a.addrRanges.ranges {
935 result = append(result, AddrRange{r})
936 }
937 return result
938 }
939
940
941
942 func (a *AddrRanges) FindSucc(base uintptr) int {
943 return a.findSucc(base)
944 }
945
946
947
948
949
950 func (a *AddrRanges) Add(r AddrRange) {
951 if !a.mutable {
952 throw("attempt to mutate immutable AddrRanges")
953 }
954 a.add(r.addrRange)
955 }
956
957
958 func (a *AddrRanges) TotalBytes() uintptr {
959 return a.addrRanges.totalBytes
960 }
961
962
963 type BitRange struct {
964 I, N uint
965 }
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
982 p := new(pageAlloc)
983
984
985 p.init(new(mutex), testSysStat, true)
986 lockInit(p.mheapLock, lockRankMheap)
987 for i, init := range chunks {
988 addr := chunkBase(chunkIdx(i))
989
990
991 systemstack(func() {
992 lock(p.mheapLock)
993 p.grow(addr, pallocChunkBytes)
994 unlock(p.mheapLock)
995 })
996
997
998 ci := chunkIndex(addr)
999 chunk := p.chunkOf(ci)
1000
1001
1002 chunk.scavenged.clearRange(0, pallocChunkPages)
1003
1004
1005
1006
1007 p.scav.index.alloc(ci, pallocChunkPages)
1008 p.scav.index.free(ci, 0, pallocChunkPages)
1009
1010
1011 if scav != nil {
1012 if scvg, ok := scav[i]; ok {
1013 for _, s := range scvg {
1014
1015
1016 if s.N != 0 {
1017 chunk.scavenged.setRange(s.I, s.N)
1018 }
1019 }
1020 }
1021 }
1022
1023
1024 for _, s := range init {
1025
1026
1027 if s.N != 0 {
1028 chunk.allocRange(s.I, s.N)
1029
1030
1031 p.scav.index.alloc(ci, s.N)
1032 }
1033 }
1034
1035
1036 systemstack(func() {
1037 lock(p.mheapLock)
1038 p.update(addr, pallocChunkPages, false, false)
1039 unlock(p.mheapLock)
1040 })
1041 }
1042
1043 return (*PageAlloc)(p)
1044 }
1045
1046
1047
1048
1049 func FreePageAlloc(pp *PageAlloc) {
1050 p := (*pageAlloc)(pp)
1051
1052
1053 if pageAlloc64Bit != 0 {
1054 for l := 0; l < summaryLevels; l++ {
1055 sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1056 }
1057 } else {
1058 resSize := uintptr(0)
1059 for _, s := range p.summary {
1060 resSize += uintptr(cap(s)) * pallocSumBytes
1061 }
1062 sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1063 }
1064
1065
1066 sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
1067
1068
1069
1070
1071
1072 gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1073 testSysStat.add(-int64(p.summaryMappedReady))
1074
1075
1076 for i := range p.chunks {
1077 if x := p.chunks[i]; x != nil {
1078 p.chunks[i] = nil
1079
1080 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1081 }
1082 }
1083 }
1084
1085
1086
1087
1088
1089
1090
1091 var BaseChunkIdx = func() ChunkIdx {
1092 var prefix uintptr
1093 if pageAlloc64Bit != 0 {
1094 prefix = 0xc000
1095 } else {
1096 prefix = 0x100
1097 }
1098 baseAddr := prefix * pallocChunkBytes
1099 if goos.IsAix != 0 {
1100 baseAddr += arenaBaseOffset
1101 }
1102 return ChunkIdx(chunkIndex(baseAddr))
1103 }()
1104
1105
1106
1107 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1108 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1109 }
1110
1111 type BitsMismatch struct {
1112 Base uintptr
1113 Got, Want uint64
1114 }
1115
1116 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1117 ok = true
1118
1119
1120 systemstack(func() {
1121 getg().m.mallocing++
1122
1123
1124 lock(&mheap_.lock)
1125
1126 heapBase := mheap_.pages.inUse.ranges[0].base.addr()
1127 secondArenaBase := arenaBase(arenaIndex(heapBase) + 1)
1128 chunkLoop:
1129 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1130 chunk := mheap_.pages.tryChunkOf(i)
1131 if chunk == nil {
1132 continue
1133 }
1134 cb := chunkBase(i)
1135 for j := 0; j < pallocChunkPages/64; j++ {
1136
1137
1138
1139
1140
1141 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1142 got := chunk.scavenged[j]
1143 if want != got {
1144
1145
1146
1147
1148
1149 if goexperiment.RandomizedHeapBase64 && (cb >= heapBase && cb < secondArenaBase) {
1150 continue
1151 }
1152 ok = false
1153 if n >= len(mismatches) {
1154 break chunkLoop
1155 }
1156 mismatches[n] = BitsMismatch{
1157 Base: cb + uintptr(j)*64*pageSize,
1158 Got: got,
1159 Want: want,
1160 }
1161 n++
1162 }
1163 }
1164 }
1165 unlock(&mheap_.lock)
1166
1167 getg().m.mallocing--
1168 })
1169 return
1170 }
1171
1172 func PageCachePagesLeaked() (leaked uintptr) {
1173 stw := stopTheWorld(stwForTestPageCachePagesLeaked)
1174
1175
1176 deadp := allp[len(allp):cap(allp)]
1177 for _, p := range deadp {
1178
1179
1180 if p != nil {
1181 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1182 }
1183 }
1184
1185 startTheWorld(stw)
1186 return
1187 }
1188
1189 var ProcYield = procyield
1190 var OSYield = osyield
1191
1192 type Mutex = mutex
1193
1194 var Lock = lock
1195 var Unlock = unlock
1196
1197 var MutexContended = mutexContended
1198
1199 func SemRootLock(addr *uint32) *mutex {
1200 root := semtable.rootFor(addr)
1201 return &root.lock
1202 }
1203
1204 var Semacquire = semacquire
1205 var Semrelease1 = semrelease1
1206
1207 func SemNwait(addr *uint32) uint32 {
1208 root := semtable.rootFor(addr)
1209 return root.nwait.Load()
1210 }
1211
1212 const SemTableSize = semTabSize
1213
1214
1215 type SemTable struct {
1216 semTable
1217 }
1218
1219
1220 func (t *SemTable) Enqueue(addr *uint32) {
1221 s := acquireSudog()
1222 s.releasetime = 0
1223 s.acquiretime = 0
1224 s.ticket = 0
1225 t.semTable.rootFor(addr).queue(addr, s, false)
1226 }
1227
1228
1229
1230
1231 func (t *SemTable) Dequeue(addr *uint32) bool {
1232 s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
1233 if s != nil {
1234 releaseSudog(s)
1235 return true
1236 }
1237 return false
1238 }
1239
1240
1241 type MSpan mspan
1242
1243
1244 func AllocMSpan() *MSpan {
1245 var s *mspan
1246 systemstack(func() {
1247 lock(&mheap_.lock)
1248 s = (*mspan)(mheap_.spanalloc.alloc())
1249 s.init(0, 0)
1250 unlock(&mheap_.lock)
1251 })
1252 return (*MSpan)(s)
1253 }
1254
1255
1256 func FreeMSpan(s *MSpan) {
1257 systemstack(func() {
1258 lock(&mheap_.lock)
1259 mheap_.spanalloc.free(unsafe.Pointer(s))
1260 unlock(&mheap_.lock)
1261 })
1262 }
1263
1264 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1265 s := (*mspan)(ms)
1266 s.nelems = uint16(len(bits) * 8)
1267 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1268 result := s.countAlloc()
1269 s.gcmarkBits = nil
1270 return result
1271 }
1272
1273 type MSpanQueue mSpanQueue
1274
1275 func (q *MSpanQueue) Size() int {
1276 return (*mSpanQueue)(q).n
1277 }
1278
1279 func (q *MSpanQueue) Push(s *MSpan) {
1280 (*mSpanQueue)(q).push((*mspan)(s))
1281 }
1282
1283 func (q *MSpanQueue) Pop() *MSpan {
1284 s := (*mSpanQueue)(q).pop()
1285 return (*MSpan)(s)
1286 }
1287
1288 func (q *MSpanQueue) TakeAll(p *MSpanQueue) {
1289 (*mSpanQueue)(q).takeAll((*mSpanQueue)(p))
1290 }
1291
1292 func (q *MSpanQueue) PopN(n int) MSpanQueue {
1293 p := (*mSpanQueue)(q).popN(n)
1294 return (MSpanQueue)(p)
1295 }
1296
1297 const (
1298 TimeHistSubBucketBits = timeHistSubBucketBits
1299 TimeHistNumSubBuckets = timeHistNumSubBuckets
1300 TimeHistNumBuckets = timeHistNumBuckets
1301 TimeHistMinBucketBits = timeHistMinBucketBits
1302 TimeHistMaxBucketBits = timeHistMaxBucketBits
1303 )
1304
1305 type TimeHistogram timeHistogram
1306
1307
1308
1309
1310
1311 func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
1312 t := (*timeHistogram)(th)
1313 if bucket < 0 {
1314 return t.underflow.Load(), false
1315 }
1316 i := bucket*TimeHistNumSubBuckets + subBucket
1317 if i >= len(t.counts) {
1318 return t.overflow.Load(), false
1319 }
1320 return t.counts[i].Load(), true
1321 }
1322
1323 func (th *TimeHistogram) Record(duration int64) {
1324 (*timeHistogram)(th).record(duration)
1325 }
1326
1327 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1328
1329 func SetIntArgRegs(a int) int {
1330 lock(&finlock)
1331 old := intArgRegs
1332 if a >= 0 {
1333 intArgRegs = a
1334 }
1335 unlock(&finlock)
1336 return old
1337 }
1338
1339 func FinalizerGAsleep() bool {
1340 return fingStatus.Load()&fingWait != 0
1341 }
1342
1343
1344
1345
1346 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1347
1348
1349
1350 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1351 return gcTestIsReachable(ptrs...)
1352 }
1353
1354
1355
1356
1357
1358
1359
1360 func GCTestPointerClass(p unsafe.Pointer) string {
1361 return gcTestPointerClass(p)
1362 }
1363
1364 const Raceenabled = raceenabled
1365
1366 const (
1367 GCBackgroundUtilization = gcBackgroundUtilization
1368 GCGoalUtilization = gcGoalUtilization
1369 DefaultHeapMinimum = defaultHeapMinimum
1370 MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
1371 MemoryLimitMinHeapGoalHeadroom = memoryLimitMinHeapGoalHeadroom
1372 )
1373
1374 type GCController struct {
1375 gcControllerState
1376 }
1377
1378 func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1379
1380
1381
1382
1383 g := Escape(new(GCController))
1384 g.gcControllerState.test = true
1385 g.init(int32(gcPercent), memoryLimit)
1386 return g
1387 }
1388
1389 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1390 trigger, _ := c.trigger()
1391 if c.heapMarked > trigger {
1392 trigger = c.heapMarked
1393 }
1394 c.maxStackScan.Store(stackSize)
1395 c.globalsScan.Store(globalsSize)
1396 c.heapLive.Store(trigger)
1397 c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
1398 c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1399 }
1400
1401 func (c *GCController) AssistWorkPerByte() float64 {
1402 return c.assistWorkPerByte.Load()
1403 }
1404
1405 func (c *GCController) HeapGoal() uint64 {
1406 return c.heapGoal()
1407 }
1408
1409 func (c *GCController) HeapLive() uint64 {
1410 return c.heapLive.Load()
1411 }
1412
1413 func (c *GCController) HeapMarked() uint64 {
1414 return c.heapMarked
1415 }
1416
1417 func (c *GCController) Triggered() uint64 {
1418 return c.triggered
1419 }
1420
1421 type GCControllerReviseDelta struct {
1422 HeapLive int64
1423 HeapScan int64
1424 HeapScanWork int64
1425 StackScanWork int64
1426 GlobalsScanWork int64
1427 }
1428
1429 func (c *GCController) Revise(d GCControllerReviseDelta) {
1430 c.heapLive.Add(d.HeapLive)
1431 c.heapScan.Add(d.HeapScan)
1432 c.heapScanWork.Add(d.HeapScanWork)
1433 c.stackScanWork.Add(d.StackScanWork)
1434 c.globalsScanWork.Add(d.GlobalsScanWork)
1435 c.revise()
1436 }
1437
1438 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1439 c.assistTime.Store(assistTime)
1440 c.endCycle(elapsed, gomaxprocs, false)
1441 c.resetLive(bytesMarked)
1442 c.commit(false)
1443 }
1444
1445 func (c *GCController) AddIdleMarkWorker() bool {
1446 return c.addIdleMarkWorker()
1447 }
1448
1449 func (c *GCController) NeedIdleMarkWorker() bool {
1450 return c.needIdleMarkWorker()
1451 }
1452
1453 func (c *GCController) RemoveIdleMarkWorker() {
1454 c.removeIdleMarkWorker()
1455 }
1456
1457 func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1458 c.setMaxIdleMarkWorkers(max)
1459 }
1460
1461 var alwaysFalse bool
1462 var escapeSink any
1463
1464 func Escape[T any](x T) T {
1465 if alwaysFalse {
1466 escapeSink = x
1467 }
1468 return x
1469 }
1470
1471
1472 func Acquirem() {
1473 acquirem()
1474 }
1475
1476 func Releasem() {
1477 releasem(getg().m)
1478 }
1479
1480 var Timediv = timediv
1481
1482 type PIController struct {
1483 piController
1484 }
1485
1486 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1487 return &PIController{piController{
1488 kp: kp,
1489 ti: ti,
1490 tt: tt,
1491 min: min,
1492 max: max,
1493 }}
1494 }
1495
1496 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1497 return c.piController.next(input, setpoint, period)
1498 }
1499
1500 const (
1501 CapacityPerProc = capacityPerProc
1502 GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1503 )
1504
1505 type GCCPULimiter struct {
1506 limiter gcCPULimiterState
1507 }
1508
1509 func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1510
1511
1512
1513
1514 l := Escape(new(GCCPULimiter))
1515 l.limiter.test = true
1516 l.limiter.resetCapacity(now, gomaxprocs)
1517 return l
1518 }
1519
1520 func (l *GCCPULimiter) Fill() uint64 {
1521 return l.limiter.bucket.fill
1522 }
1523
1524 func (l *GCCPULimiter) Capacity() uint64 {
1525 return l.limiter.bucket.capacity
1526 }
1527
1528 func (l *GCCPULimiter) Overflow() uint64 {
1529 return l.limiter.overflow
1530 }
1531
1532 func (l *GCCPULimiter) Limiting() bool {
1533 return l.limiter.limiting()
1534 }
1535
1536 func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1537 return l.limiter.needUpdate(now)
1538 }
1539
1540 func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1541 l.limiter.startGCTransition(enableGC, now)
1542 }
1543
1544 func (l *GCCPULimiter) FinishGCTransition(now int64) {
1545 l.limiter.finishGCTransition(now)
1546 }
1547
1548 func (l *GCCPULimiter) Update(now int64) {
1549 l.limiter.update(now)
1550 }
1551
1552 func (l *GCCPULimiter) AddAssistTime(t int64) {
1553 l.limiter.addAssistTime(t)
1554 }
1555
1556 func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1557 l.limiter.resetCapacity(now, nprocs)
1558 }
1559
1560 const ScavengePercent = scavengePercent
1561
1562 type Scavenger struct {
1563 Sleep func(int64) int64
1564 Scavenge func(uintptr) (uintptr, int64)
1565 ShouldStop func() bool
1566 GoMaxProcs func() int32
1567
1568 released atomic.Uintptr
1569 scavenger scavengerState
1570 stop chan<- struct{}
1571 done <-chan struct{}
1572 }
1573
1574 func (s *Scavenger) Start() {
1575 if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1576 panic("must populate all stubs")
1577 }
1578
1579
1580 s.scavenger.sleepStub = s.Sleep
1581 s.scavenger.scavenge = s.Scavenge
1582 s.scavenger.shouldStop = s.ShouldStop
1583 s.scavenger.gomaxprocs = s.GoMaxProcs
1584
1585
1586 stop := make(chan struct{})
1587 s.stop = stop
1588 done := make(chan struct{})
1589 s.done = done
1590 go func() {
1591
1592 s.scavenger.init()
1593 s.scavenger.park()
1594 for {
1595 select {
1596 case <-stop:
1597 close(done)
1598 return
1599 default:
1600 }
1601 released, workTime := s.scavenger.run()
1602 if released == 0 {
1603 s.scavenger.park()
1604 continue
1605 }
1606 s.released.Add(released)
1607 s.scavenger.sleep(workTime)
1608 }
1609 }()
1610 if !s.BlockUntilParked(1e9 ) {
1611 panic("timed out waiting for scavenger to get ready")
1612 }
1613 }
1614
1615
1616
1617
1618
1619
1620
1621 func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1622
1623
1624
1625
1626
1627 start := nanotime()
1628 for nanotime()-start < timeout {
1629 lock(&s.scavenger.lock)
1630 parked := s.scavenger.parked
1631 unlock(&s.scavenger.lock)
1632 if parked {
1633 return true
1634 }
1635 Gosched()
1636 }
1637 return false
1638 }
1639
1640
1641 func (s *Scavenger) Released() uintptr {
1642 return s.released.Load()
1643 }
1644
1645
1646 func (s *Scavenger) Wake() {
1647 s.scavenger.wake()
1648 }
1649
1650
1651
1652 func (s *Scavenger) Stop() {
1653 lock(&s.scavenger.lock)
1654 parked := s.scavenger.parked
1655 unlock(&s.scavenger.lock)
1656 if !parked {
1657 panic("tried to clean up scavenger that is not parked")
1658 }
1659 close(s.stop)
1660 s.Wake()
1661 <-s.done
1662 }
1663
1664 type ScavengeIndex struct {
1665 i scavengeIndex
1666 }
1667
1668 func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1669 s := new(ScavengeIndex)
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681 s.i.chunks = make([]atomicScavChunkData, max)
1682 s.i.min.Store(uintptr(min))
1683 s.i.max.Store(uintptr(max))
1684 s.i.minHeapIdx.Store(uintptr(min))
1685 s.i.test = true
1686 return s
1687 }
1688
1689 func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
1690 ci, off := s.i.find(force)
1691 return ChunkIdx(ci), off
1692 }
1693
1694 func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
1695 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1696 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1697
1698 if sc == ec {
1699
1700 s.i.alloc(sc, ei+1-si)
1701 } else {
1702
1703 s.i.alloc(sc, pallocChunkPages-si)
1704 for c := sc + 1; c < ec; c++ {
1705 s.i.alloc(c, pallocChunkPages)
1706 }
1707 s.i.alloc(ec, ei+1)
1708 }
1709 }
1710
1711 func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
1712 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1713 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1714
1715 if sc == ec {
1716
1717 s.i.free(sc, si, ei+1-si)
1718 } else {
1719
1720 s.i.free(sc, si, pallocChunkPages-si)
1721 for c := sc + 1; c < ec; c++ {
1722 s.i.free(c, 0, pallocChunkPages)
1723 }
1724 s.i.free(ec, 0, ei+1)
1725 }
1726 }
1727
1728 func (s *ScavengeIndex) ResetSearchAddrs() {
1729 for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
1730 addr, marked := a.Load()
1731 if marked {
1732 a.StoreUnmark(addr, addr)
1733 }
1734 a.Clear()
1735 }
1736 s.i.freeHWM = minOffAddr
1737 }
1738
1739 func (s *ScavengeIndex) NextGen() {
1740 s.i.nextGen()
1741 }
1742
1743 func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
1744 s.i.setEmpty(chunkIdx(ci))
1745 }
1746
1747 func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
1748 sc0 := scavChunkData{
1749 gen: gen,
1750 inUse: inUse,
1751 lastInUse: lastInUse,
1752 scavChunkFlags: scavChunkFlags(flags),
1753 }
1754 scp := sc0.pack()
1755 sc1 := unpackScavChunkData(scp)
1756 return sc0 == sc1
1757 }
1758
1759 const GTrackingPeriod = gTrackingPeriod
1760
1761 var ZeroBase = unsafe.Pointer(&zerobase)
1762
1763 const UserArenaChunkBytes = userArenaChunkBytes
1764
1765 type UserArena struct {
1766 arena *userArena
1767 }
1768
1769 func NewUserArena() *UserArena {
1770 return &UserArena{newUserArena()}
1771 }
1772
1773 func (a *UserArena) New(out *any) {
1774 i := efaceOf(out)
1775 typ := i._type
1776 if typ.Kind_&abi.KindMask != abi.Pointer {
1777 panic("new result of non-ptr type")
1778 }
1779 typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
1780 i.data = a.arena.new(typ)
1781 }
1782
1783 func (a *UserArena) Slice(sl any, cap int) {
1784 a.arena.slice(sl, cap)
1785 }
1786
1787 func (a *UserArena) Free() {
1788 a.arena.free()
1789 }
1790
1791 func GlobalWaitingArenaChunks() int {
1792 n := 0
1793 systemstack(func() {
1794 lock(&mheap_.lock)
1795 for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
1796 n++
1797 }
1798 unlock(&mheap_.lock)
1799 })
1800 return n
1801 }
1802
1803 func UserArenaClone[T any](s T) T {
1804 return arena_heapify(s).(T)
1805 }
1806
1807 var AlignUp = alignUp
1808
1809 func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
1810 return blockUntilEmptyFinalizerQueue(timeout)
1811 }
1812
1813 func BlockUntilEmptyCleanupQueue(timeout int64) bool {
1814 return gcCleanups.blockUntilEmpty(timeout)
1815 }
1816
1817 func FrameStartLine(f *Frame) int {
1818 return f.startLine
1819 }
1820
1821
1822
1823 func PersistentAlloc(n, align uintptr) unsafe.Pointer {
1824 return persistentalloc(n, align, &memstats.other_sys)
1825 }
1826
1827 const TagAlign = tagAlign
1828
1829
1830
1831 func FPCallers(pcBuf []uintptr) int {
1832 return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
1833 }
1834
1835 const FramePointerEnabled = framepointer_enabled
1836
1837 var (
1838 IsPinned = isPinned
1839 GetPinCounter = pinnerGetPinCounter
1840 )
1841
1842 func SetPinnerLeakPanic(f func()) {
1843 pinnerLeakPanic = f
1844 }
1845 func GetPinnerLeakPanic() func() {
1846 return pinnerLeakPanic
1847 }
1848
1849 var testUintptr uintptr
1850
1851 func MyGenericFunc[T any]() {
1852 systemstack(func() {
1853 testUintptr = 4
1854 })
1855 }
1856
1857 func UnsafePoint(pc uintptr) bool {
1858 fi := findfunc(pc)
1859 v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
1860 switch v {
1861 case abi.UnsafePointUnsafe:
1862 return true
1863 case abi.UnsafePointSafe:
1864 return false
1865 case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
1866
1867
1868 return false
1869 default:
1870 var buf [20]byte
1871 panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
1872 }
1873 }
1874
1875 type TraceMap struct {
1876 traceMap
1877 }
1878
1879 func (m *TraceMap) PutString(s string) (uint64, bool) {
1880 return m.traceMap.put(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
1881 }
1882
1883 func (m *TraceMap) Reset() {
1884 m.traceMap.reset()
1885 }
1886
1887 func SetSpinInGCMarkDone(spin bool) {
1888 gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin)
1889 }
1890
1891 func GCMarkDoneRestarted() bool {
1892
1893 mp := acquirem()
1894 if gcphase != _GCoff {
1895 releasem(mp)
1896 return false
1897 }
1898 restarted := gcDebugMarkDone.restartedDueTo27993
1899 releasem(mp)
1900 return restarted
1901 }
1902
1903 func GCMarkDoneResetRestartFlag() {
1904 mp := acquirem()
1905 for gcphase != _GCoff {
1906 releasem(mp)
1907 Gosched()
1908 mp = acquirem()
1909 }
1910 gcDebugMarkDone.restartedDueTo27993 = false
1911 releasem(mp)
1912 }
1913
1914 type BitCursor struct {
1915 b bitCursor
1916 }
1917
1918 func NewBitCursor(buf *byte) BitCursor {
1919 return BitCursor{b: bitCursor{ptr: buf, n: 0}}
1920 }
1921
1922 func (b BitCursor) Write(data *byte, cnt uintptr) {
1923 b.b.write(data, cnt)
1924 }
1925 func (b BitCursor) Offset(cnt uintptr) BitCursor {
1926 return BitCursor{b: b.b.offset(cnt)}
1927 }
1928
1929 const (
1930 BubbleAssocUnbubbled = bubbleAssocUnbubbled
1931 BubbleAssocCurrentBubble = bubbleAssocCurrentBubble
1932 BubbleAssocOtherBubble = bubbleAssocOtherBubble
1933 )
1934
1935 type TraceStackTable traceStackTable
1936
1937 func (t *TraceStackTable) Reset() {
1938 t.tab.reset()
1939 }
1940
1941 func TraceStack(gp *G, tab *TraceStackTable) {
1942 traceStack(0, gp, (*traceStackTable)(tab))
1943 }
1944
View as plain text