Lines Matching full:gp

370 	gp := getg()
371 // Call gosched if gp.preempt is set; we may be in a tight loop that
373 if !gp.preempt && sched.npidle.Load() > 0 {
412 gp := mp.curg
413 status := readgstatus(gp)
419 gp.waitreason = reason
428 // The goroutine can be made runnable again by calling goready(gp).
443 func goready(gp *g, traceskip int) {
445 ready(gp, traceskip, true)
507 gp := getg()
508 if gp.param != nil {
509 throw("runtime: releaseSudog with non-nil gp.param")
620 gp := getg()
621 return gp.lockedm != 0 && gp.m.lockedg != 0
650 func allgadd(gp *g) {
651 if readgstatus(gp) == _Gidle {
656 allgs = append(allgs, gp)
693 func forEachG(fn func(gp *g)) {
695 for _, gp := range allgs {
696 fn(gp)
705 func forEachGRace(fn func(gp *g)) {
708 gp := atomicAllGIndex(ptr, i)
709 fn(gp)
804 gp := getg()
806 gp.racectx, raceprocctx0 = raceinit()
823 mcommoninit(gp.m, -1)
829 sigsave(&gp.m.sigmask)
830 initSigmask = gp.m.sigmask
854 mProfStackInit(gp.m)
882 func dumpgstatus(gp *g) {
884 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
925 gp := getg()
928 if gp != gp.m.g0 {
1021 // Mark gp ready to run.
1022 func ready(gp *g, traceskip int, next bool) {
1023 status := readgstatus(gp)
1028 dumpgstatus(gp)
1034 casgstatus(gp, _Gwaiting, _Grunnable)
1036 trace.GoUnpark(gp, traceskip)
1039 runqput(mp.p.ptr(), gp, next)
1109 func readgstatus(gp *g) uint32 {
1110 return gp.atomicstatus.Load()
1117 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1123 …print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", he…
1124 dumpgstatus(gp)
1125 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1132 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1136 …print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(ne…
1137 dumpgstatus(gp)
1138 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1143 // This will return false if the gp is not in the expected status and the cas fails.
1145 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1152 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1175 func casgstatus(gp *g, oldval, newval uint32) {
1191 // loop if gp->atomicstatus is in a scan state giving
1193 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1194 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1205 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1216 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1217 gp.tracking = true
1219 gp.trackingSeq++
1221 if !gp.tracking {
1236 gp.runnableTime += now - gp.trackingStamp
1237 gp.trackingStamp = 0
1239 if !gp.waitreason.isMutexWait() {
1249 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1250 gp.trackingStamp = 0
1254 if !gp.waitreason.isMutexWait() {
1260 gp.trackingStamp = now
1265 gp.trackingStamp = now
1270 gp.tracking = false
1271 sched.timeToRun.record(gp.runnableTime)
1272 gp.runnableTime = 0
1276 // casGToWaiting transitions gp from old to _Gwaiting, and sets the wait reason.
1279 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1281 gp.waitreason = reason
1282 casgstatus(gp, old, _Gwaiting)
1285 // casGToWaitingForGC transitions gp from old to _Gwaiting, and sets the wait reason.
1289 func casGToWaitingForGC(gp *g, old uint32, reason waitReason) {
1293 casGToWaiting(gp, old, reason)
1296 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable.
1303 func casgcopystack(gp *g) uint32 {
1305 oldstatus := readgstatus(gp) &^ _Gscan
1309 if gp.atomicstatus.CompareAndSwap(oldstatus, _Gcopystack) {
1315 // casGToPreemptScan transitions gp from _Grunning to _Gscan|_Gpreempted.
1319 func casGToPreemptScan(gp *g, old, new uint32) {
1324 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1328 // casGFromPreempted attempts to transition gp from _Gpreempted to
1330 // re-scheduling gp.
1331 func casGFromPreempted(gp *g, old, new uint32) bool {
1335 gp.waitreason = waitReasonPreempted
1336 return gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting)
1429 gp := getg()
1430 gp.m.preemptoff = reason.String()
1446 casGToWaitingForGC(gp, _Grunning, waitReasonStoppingTheWorld)
1448 casgstatus(gp, _Gwaiting, _Grunning)
1543 gp := getg()
1547 if gp.m.locks > 0 {
1557 gp.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
1558 gp.m.p.ptr().gcStopTime = start
1766 gp := getg()
1768 osStack := gp.stack.lo == 0
1778 size := gp.stack.hi
1782 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1783 gp.stack.lo = gp.stack.hi - size + 1024
1787 gp.stackguard0 = gp.stack.lo + stackGuard
1790 gp.stackguard1 = gp.stackguard0
1796 // the stack, but put it in gp.stack before mstart,
1808 gp := getg()
1810 if gp != gp.m.g0 {
1820 gp.sched.g = guintptr(unsafe.Pointer(gp))
1821 gp.sched.pc = getcallerpc()
1822 gp.sched.sp = getcallersp()
1829 if gp.m == &m0 {
1833 if fn := gp.m.mstartfn; fn != nil {
1837 if gp.m != &m0 {
1838 acquirep(gp.m.nextp.ptr())
1839 gp.m.nextp = 0
1865 gp := getg()
1866 notesleep(&gp.m.park)
1867 noteclear(&gp.m.park)
1873 // the top of the thread stack. Instead, use gogo(&gp.m.g0.sched) to
2001 gp := getg().m.curg
2010 casGToWaitingForGC(gp, _Grunning, reason)
2012 casgstatus(gp, _Gwaiting, _Grunning)
2169 gp := getg()
2170 if gp.m.p == 0 {
2225 if pp == gp.m.p.ptr() {
2229 releasem(gp.m)
2390 gp := malg(4096)
2391 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2392 gp.sched.sp = gp.stack.hi
2393 gp.sched.sp -= 4 * goarch.PtrSize // extra space in case of reads slightly beyond frame
2394 gp.sched.lr = 0
2395 gp.sched.g = guintptr(unsafe.Pointer(gp))
2396 gp.syscallpc = gp.sched.pc
2397 gp.syscallsp = gp.sched.sp
2398 gp.stktopsp = gp.sched.sp
2403 casgstatus(gp, _Gidle, _Gdead)
2404 gp.m = mp
2405 mp.curg = gp
2410 mp.lockedg.set(gp)
2411 gp.lockedm.set(mp)
2412 gp.goid = sched.goidgen.Add(1)
2414 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2417 allgadd(gp)
2419 // gp is now on the allg list, but we don't want it to be
2750 …if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9…
2870 gp := getg()
2872 if gp.m.locks != 0 {
2875 if gp.m.p != 0 {
2878 if gp.m.spinning {
2883 mput(gp.m)
2886 acquirep(gp.m.nextp.ptr())
2887 gp.m.nextp = 0
3128 gp := getg()
3130 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3133 if gp.m.p != 0 {
3141 status := readgstatus(gp.m.lockedg.ptr())
3144 dumpgstatus(gp.m.lockedg.ptr())
3147 acquirep(gp.m.nextp.ptr())
3148 gp.m.nextp = 0
3151 // Schedules the locked m to run the locked gp.
3155 func startlockedm(gp *g) {
3156 mp := gp.lockedm.ptr()
3174 gp := getg()
3179 if gp.m.spinning {
3180 gp.m.spinning = false
3199 // Schedules gp to run on the current M.
3200 // If inheritTime is true, gp inherits the remaining time in the
3208 func execute(gp *g, inheritTime bool) {
3212 // Make sure that gp has had its stack written out to the goroutine
3215 tryRecordGoroutineProfile(gp, nil, osyield)
3218 // Assign gp.m before entering _Grunning so running Gs have an
3220 mp.curg = gp
3221 gp.m = mp
3222 casgstatus(gp, _Grunnable, _Grunning)
3223 gp.waitsince = 0
3224 gp.preempt = false
3225 gp.stackguard0 = gp.stack.lo + stackGuard
3242 gogo(&gp.sched)
3249 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3274 gp := traceReader()
3275 if gp != nil {
3277 casgstatus(gp, _Gwaiting, _Grunnable)
3279 trace.GoUnpark(gp, 0)
3282 return gp, false, true
3288 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3289 if gp != nil {
3290 return gp, false, true
3300 gp := globrunqget(pp, 1)
3302 if gp != nil {
3303 return gp, false, false
3309 if gp := wakefing(); gp != nil {
3310 ready(gp, 0, true)
3318 if gp, inheritTime := runqget(pp); gp != nil {
3319 return gp, inheritTime, false
3325 gp := globrunqget(pp, 0)
3327 if gp != nil {
3328 return gp, false, false
3341 gp := list.pop()
3345 casgstatus(gp, _Gwaiting, _Grunnable)
3347 trace.GoUnpark(gp, 0)
3350 return gp, false, false
3364 gp, inheritTime, tnow, w, newWork := stealWork(now)
3365 if gp != nil {
3367 return gp, inheritTime, false
3390 gp := node.gp.ptr()
3393 casgstatus(gp, _Gwaiting, _Grunnable)
3395 trace.GoUnpark(gp, 0)
3398 return gp, false, false
3407 gp, otherReady := beforeIdle(now, pollUntil)
3408 if gp != nil {
3410 casgstatus(gp, _Gwaiting, _Grunnable)
3412 trace.GoUnpark(gp, 0)
3415 return gp, false, false
3438 gp := globrunqget(pp, 0)
3440 return gp, false, false
3512 gp := globrunqget(pp, 0)
3513 if gp == nil {
3519 return gp, false, false
3532 pp, gp := checkIdleGCNoP()
3540 casgstatus(gp, _Gwaiting, _Grunnable)
3542 trace.GoUnpark(gp, 0)
3545 return gp, false, false
3600 gp := list.pop()
3604 casgstatus(gp, _Gwaiting, _Grunnable)
3606 trace.GoUnpark(gp, 0)
3609 return gp, false, false
3654 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3701 if gp, inheritTime := runqget(pp); gp != nil {
3702 return gp, inheritTime, now, pollUntil, ranTimer
3710 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3711 return gp, false, now, pollUntil, ranTimer
3822 return pp, node.gp.ptr()
3848 gp := getg()
3849 if !gp.m.spinning {
3852 gp.m.spinning = false
3877 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
3878 trace.GoUnpark(gp, 0)
3888 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3889 tail = gp
3891 casgstatus(gp, _Gwaiting, _Grunnable)
3995 gp, inheritTime, tryWakeP := findRunnable() // blocks until work is available
4016 if sched.disable.user && !schedEnabled(gp) {
4021 if schedEnabled(gp) {
4026 sched.disable.runnable.pushBack(gp)
4038 if gp.lockedm != 0 {
4041 startlockedm(gp)
4045 execute(gp, inheritTime)
4048 // dropg removes the association between m and the current goroutine m->curg (gp for short).
4049 // Typically a caller sets gp's status away from Grunning and then
4051 // for arranging that gp will be restarted using ready at an
4052 // appropriate time. After calling dropg and arranging for gp to be
4056 gp := getg()
4058 setMNoWB(&gp.m.curg.m, nil)
4059 setGNoWB(&gp.m.curg, nil)
4062 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4068 func park_m(gp *g) {
4081 casgstatus(gp, _Grunning, _Gwaiting)
4089 ok := fn(gp, mp.waitlock)
4094 casgstatus(gp, _Gwaiting, _Grunnable)
4096 trace.GoUnpark(gp, 2)
4099 execute(gp, true) // Schedule it back, never returns.
4105 func goschedImpl(gp *g, preempted bool) {
4107 status := readgstatus(gp)
4109 dumpgstatus(gp)
4122 casgstatus(gp, _Grunning, _Grunnable)
4129 globrunqput(gp)
4140 func gosched_m(gp *g) {
4141 goschedImpl(gp, false)
4145 func goschedguarded_m(gp *g) {
4146 if !canPreemptM(gp.m) {
4147 gogo(&gp.sched) // never return
4149 goschedImpl(gp, false)
4152 func gopreempt_m(gp *g) {
4153 goschedImpl(gp, true)
4156 // preemptPark parks gp and puts it in _Gpreempted.
4159 func preemptPark(gp *g) {
4160 status := readgstatus(gp)
4162 dumpgstatus(gp)
4166 if gp.asyncSafePoint {
4170 f := findfunc(gp.sched.pc)
4186 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4208 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4234 func goyield_m(gp *g) {
4236 pp := gp.m.p.ptr()
4243 casgstatus(gp, _Grunning, _Grunnable)
4248 runqput(pp, gp, false)
4266 func goexit0(gp *g) {
4267 gdestroy(gp)
4271 func gdestroy(gp *g) {
4275 casgstatus(gp, _Grunning, _Gdead)
4276 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4277 if isSystemGoroutine(gp, false) {
4280 gp.m = nil
4281 locked := gp.lockedm != 0
4282 gp.lockedm = 0
4284 gp.preemptStop = false
4285 gp.paniconfault = false
4286 gp._defer = nil // should be true already but just in case.
4287 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
4288 gp.writebuf = nil
4289 gp.waitreason = waitReasonZero
4290 gp.param = nil
4291 gp.labels = nil
4292 gp.timer = nil
4294 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4299 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4301 gp.gcAssistBytes = 0
4307 gfput(pp, gp)
4315 gfput(pp, gp)
4342 gp := getg()
4344 if gp == gp.m.g0 || gp == gp.m.gsignal {
4353 gp.sched.pc = pc
4354 gp.sched.sp = sp
4355 gp.sched.lr = 0
4356 gp.sched.ret = 0
4357 gp.sched.bp = bp
4361 if gp.sched.ctxt != nil {
4392 gp := getg()
4396 gp.m.locks++
4402 gp.stackguard0 = stackPreempt
4403 gp.throwsplit = true
4407 gp.syscallsp = sp
4408 gp.syscallpc = pc
4409 gp.syscallbp = bp
4410 casgstatus(gp, _Grunning, _Gsyscall)
4416 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4418 …print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stac…
4422 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4424 …print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stac…
4445 if gp.m.p.ptr().runSafePointFn != 0 {
4451 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4452 pp := gp.m.p.ptr()
4454 gp.m.oldp.set(pp)
4455 gp.m.p = 0
4462 gp.m.locks--
4498 gp := getg()
4499 pp := gp.m.oldp.ptr()
4512 // gp.m.syscalltick == pp.syscalltick, since then we know we never
4541 gp := getg()
4543 gp.m.locks++ // see comment in entersyscall
4544 gp.throwsplit = true
4545 gp.stackguard0 = stackPreempt // see comment in entersyscall
4546 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4547 gp.m.p.ptr().syscalltick++
4554 gp.syscallsp = gp.sched.sp
4555 gp.syscallpc = gp.sched.pc
4556 gp.syscallbp = gp.sched.bp
4557 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4559 sp2 := gp.sched.sp
4560 sp3 := gp.syscallsp
4562 …sistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi…
4566 casgstatus(gp, _Grunning, _Gsyscall)
4567 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4569 …ck inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.l…
4573 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4575 …ck inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.l…
4585 gp.m.locks--
4618 gp := getg()
4620 gp.m.locks++ // see comment in entersyscall
4621 if getcallersp() > gp.syscallsp {
4625 gp.waitsince = 0
4626 oldp := gp.m.oldp.ptr()
4627 gp.m.oldp = 0
4632 // Make sure that gp has had its stack written out to the goroutine
4636 tryRecordGoroutineProfileWB(gp)
4641 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4658 gp.m.p.ptr().syscalltick++
4660 casgstatus(gp, _Gsyscall, _Grunning)
4667 gp.syscallsp = 0
4668 gp.m.locks--
4669 if gp.preempt {
4671 gp.stackguard0 = stackPreempt
4674 gp.stackguard0 = gp.stack.lo + stackGuard
4676 gp.throwsplit = false
4678 if sched.disable.user && !schedEnabled(gp) {
4686 gp.m.locks--
4697 gp.syscallsp = 0
4698 gp.m.p.ptr().syscalltick++
4699 gp.throwsplit = false
4743 gp := getg()
4744 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4746 // The p was retaken and then enter into syscall again (since gp.m.syscalltick has changed).
4752 trace.ProcSteal(gp.m.p.ptr(), true)
4756 gp.m.p.ptr().syscalltick++
4776 // Failed to acquire P, enqueue gp as runnable.
4778 // Called via mcall, so gp is the calling g from this M.
4781 func exitsyscall0(gp *g) {
4785 casgstatus(gp, _Gsyscall, _Grunnable)
4798 if schedEnabled(gp) {
4803 globrunqput(gp)
4805 // Below, we stoplockedm if gp is locked. globrunqput releases
4806 // ownership of gp, so we must check if gp is locked prior to
4808 // could race with another M transitioning gp from unlocked to
4810 locked = gp.lockedm != 0
4818 execute(gp, false) // Never returns.
4821 // Wait until another thread schedules gp and so m again.
4826 execute(gp, false) // Never returns.
4846 gp := getg().m.curg
4851 gp.m.locks++
4852 sigsave(&gp.m.sigmask)
4859 gp.stackguard0 = stackFork
4876 gp := getg().m.curg
4879 gp.stackguard0 = gp.stack.lo + stackGuard
4881 msigrestore(gp.m.sigmask)
4883 gp.m.locks--
4920 // so we know that nothing else has changed gp.m.sigmask.
4975 gp := getg()
4978 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5134 func gfput(pp *p, gp *g) {
5135 if readgstatus(gp) != _Gdead {
5139 stksize := gp.stack.hi - gp.stack.lo
5143 stackfree(gp.stack)
5144 gp.stack.lo = 0
5145 gp.stack.hi = 0
5146 gp.stackguard0 = 0
5149 pp.gFree.push(gp)
5158 gp := pp.gFree.pop()
5160 if gp.stack.lo == 0 {
5161 noStackQ.push(gp)
5163 stackQ.push(gp)
5184 gp := sched.gFree.stack.pop()
5185 if gp == nil {
5186 gp = sched.gFree.noStack.pop()
5187 if gp == nil {
5192 pp.gFree.push(gp)
5198 gp := pp.gFree.pop()
5199 if gp == nil {
5203 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5208 stackfree(gp.stack)
5209 gp.stack.lo = 0
5210 gp.stack.hi = 0
5211 gp.stackguard0 = 0
5214 if gp.stack.lo == 0 {
5217 gp.stack = stackalloc(startingStackSize)
5219 gp.stackguard0 = gp.stack.lo + stackGuard
5222 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5225 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5228 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5231 return gp
5242 gp := pp.gFree.pop()
5244 if gp.stack.lo == 0 {
5245 noStackQ.push(gp)
5247 stackQ.push(gp)
5272 gp := getg()
5273 gp.m.lockedg.set(gp)
5274 gp.lockedm.set(gp.m)
5300 gp := getg()
5301 gp.m.lockedExt++
5302 if gp.m.lockedExt == 0 {
5303 gp.m.lockedExt--
5324 gp := getg()
5325 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5328 gp.m.lockedg = 0
5329 gp.lockedm = 0
5347 gp := getg()
5348 if gp.m.lockedExt == 0 {
5351 gp.m.lockedExt--
5357 gp := getg()
5358 if gp.m.lockedInt == 0 {
5361 gp.m.lockedInt--
5407 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5476 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5478 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5502 // with no g, so gp could nil. The other nil checks are done out of
5505 if gp != nil && gp.m != nil && gp.m.curg != nil {
5506 tagPtr = &gp.m.curg.labels
5510 gprof := gp
5513 if gp != nil && gp.m != nil {
5514 if gp.m.curg != nil {
5515 gprof = gp.m.curg
5517 mp = gp.m
5518 pp = gp.m.p.ptr()
5535 gp := getg()
5536 gp.m.locks++
5560 gp.m.locks--
5613 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5615 globrunqputhead(gp)
5747 gp := getg()
5748 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5750 gp.m.p.ptr().status = _Prunning
5751 gp.m.p.ptr().mcache.prepareForSweep()
5758 if gp.m.p != 0 {
5765 trace.ProcStop(gp.m.p.ptr())
5768 gp.m.p.ptr().m = 0
5770 gp.m.p = 0
5804 if gp.m.p.ptr() == pp {
5856 gp := getg()
5858 if gp.m.p != 0 {
5877 gp.m.p.set(pp)
5878 pp.m.set(gp.m)
5894 gp := getg()
5896 if gp.m.p == 0 {
5899 pp := gp.m.p.ptr()
5900 if pp.m.ptr() != gp.m || pp.status != _Prunning {
5901 …print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status,…
5904 gp.m.p = 0
5960 forEachG(func(gp *g) {
5961 if isSystemGoroutine(gp, false) {
5964 s := readgstatus(gp)
5972 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6302 // and will be indicated by the gp->status no longer being
6309 gp := mp.curg
6310 if gp == nil || gp == mp.g0 {
6314 gp.preempt = true
6317 // comparing the current stack pointer to gp->stackguard0.
6318 // Setting gp->stackguard0 to StackPreempt folds
6320 gp.stackguard0 = stackPreempt
6401 forEachG(func(gp *g) {
6402 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6403 if gp.m != nil {
6404 print(gp.m.id)
6409 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6444 // schedEnabled reports whether gp should be scheduled. It returns
6445 // false is scheduling of gp is disabled.
6448 func schedEnabled(gp *g) bool {
6452 return isSystemGoroutine(gp, true)
6487 // Put gp on the global runnable queue.
6492 func globrunqput(gp *g) {
6495 sched.runq.pushBack(gp)
6499 // Put gp at the head of the global runnable queue.
6504 func globrunqputhead(gp *g) {
6507 sched.runq.push(gp)
6547 gp := sched.runq.pop()
6553 return gp
6696 func runqput(pp *p, gp *g, next bool) {
6715 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
6722 gp = oldnext.ptr()
6729 pp.runq[t%uint32(len(pp.runq))].set(gp)
6733 if runqputslow(pp, gp, h, t) {
6742 func runqputslow(pp *p, gp *g, h, t uint32) bool {
6757 batch[n] = gp
6790 gp := q.pop()
6791 pp.runq[t%uint32(len(pp.runq))].set(gp)
6816 // If inheritTime is true, gp should inherit the remaining time in the
6819 func runqget(pp *p) (gp *g, inheritTime bool) {
6835 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6837 return gp, false
6870 // so that we can update any gp.schedlink only after we take the full ownership of G,
6874 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6875 drainQ.pushBack(gp)
6947 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
6949 return gp
6956 return gp
6971 // push adds gp to the head of q.
6972 func (q *gQueue) push(gp *g) {
6973 gp.schedlink = q.head
6974 q.head.set(gp)
6976 q.tail.set(gp)
6980 // pushBack adds gp to the tail of q.
6981 func (q *gQueue) pushBack(gp *g) {
6982 gp.schedlink = 0
6984 q.tail.ptr().schedlink.set(gp)
6986 q.head.set(gp)
6988 q.tail.set(gp)
7009 gp := q.head.ptr()
7010 if gp != nil {
7011 q.head = gp.schedlink
7016 return gp
7037 // push adds gp to the head of l.
7038 func (l *gList) push(gp *g) {
7039 gp.schedlink = l.head
7040 l.head.set(gp)
7053 gp := l.head.ptr()
7054 if gp != nil {
7055 l.head = gp.schedlink
7057 return gp
7087 gp := getg()
7088 mp := gp.m
7107 gp := getg()
7108 gp.m.locks--