Source file src/runtime/cgocall.go
1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Cgo call and callback support. 6 // 7 // To call into the C function f from Go, the cgo-generated code calls 8 // runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a 9 // gcc-compiled function written by cgo. 10 // 11 // runtime.cgocall (below) calls entersyscall so as not to block 12 // other goroutines or the garbage collector, and then calls 13 // runtime.asmcgocall(_cgo_Cfunc_f, frame). 14 // 15 // runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack 16 // (assumed to be an operating system-allocated stack, so safe to run 17 // gcc-compiled code on) and calls _cgo_Cfunc_f(frame). 18 // 19 // _cgo_Cfunc_f invokes the actual C function f with arguments 20 // taken from the frame structure, records the results in the frame, 21 // and returns to runtime.asmcgocall. 22 // 23 // After it regains control, runtime.asmcgocall switches back to the 24 // original g (m->curg)'s stack and returns to runtime.cgocall. 25 // 26 // After it regains control, runtime.cgocall calls exitsyscall, which blocks 27 // until this m can run Go code without violating the $GOMAXPROCS limit, 28 // and then unlocks g from m. 29 // 30 // The above description skipped over the possibility of the gcc-compiled 31 // function f calling back into Go. If that happens, we continue down 32 // the rabbit hole during the execution of f. 33 // 34 // To make it possible for gcc-compiled C code to call a Go function p.GoF, 35 // cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't 36 // know about packages). The gcc-compiled C function f calls GoF. 37 // 38 // GoF initializes "frame", a structure containing all of its 39 // arguments and slots for p.GoF's results. It calls 40 // crosscall2(_cgoexp_GoF, frame, framesize, ctxt) using the gcc ABI. 41 // 42 // crosscall2 (in cgo/asm_$GOARCH.s) is a four-argument adapter from 43 // the gcc function call ABI to the gc function call ABI. At this 44 // point we're in the Go runtime, but we're still running on m.g0's 45 // stack and outside the $GOMAXPROCS limit. crosscall2 calls 46 // runtime.cgocallback(_cgoexp_GoF, frame, ctxt) using the gc ABI. 47 // (crosscall2's framesize argument is no longer used, but there's one 48 // case where SWIG calls crosscall2 directly and expects to pass this 49 // argument. See _cgo_panic.) 50 // 51 // runtime.cgocallback (in asm_$GOARCH.s) switches from m.g0's stack 52 // to the original g (m.curg)'s stack, on which it calls 53 // runtime.cgocallbackg(_cgoexp_GoF, frame, ctxt). As part of the 54 // stack switch, runtime.cgocallback saves the current SP as 55 // m.g0.sched.sp, so that any use of m.g0's stack during the execution 56 // of the callback will be done below the existing stack frames. 57 // Before overwriting m.g0.sched.sp, it pushes the old value on the 58 // m.g0 stack, so that it can be restored later. 59 // 60 // runtime.cgocallbackg (below) is now running on a real goroutine 61 // stack (not an m.g0 stack). First it calls runtime.exitsyscall, which will 62 // block until the $GOMAXPROCS limit allows running this goroutine. 63 // Once exitsyscall has returned, it is safe to do things like call the memory 64 // allocator or invoke the Go callback function. runtime.cgocallbackg 65 // first defers a function to unwind m.g0.sched.sp, so that if p.GoF 66 // panics, m.g0.sched.sp will be restored to its old value: the m.g0 stack 67 // and the m.curg stack will be unwound in lock step. 68 // Then it calls _cgoexp_GoF(frame). 69 // 70 // _cgoexp_GoF, which was generated by cmd/cgo, unpacks the arguments 71 // from frame, calls p.GoF, writes the results back to frame, and 72 // returns. Now we start unwinding this whole process. 73 // 74 // runtime.cgocallbackg pops but does not execute the deferred 75 // function to unwind m.g0.sched.sp, calls runtime.entersyscall, and 76 // returns to runtime.cgocallback. 77 // 78 // After it regains control, runtime.cgocallback switches back to 79 // m.g0's stack (the pointer is still in m.g0.sched.sp), restores the old 80 // m.g0.sched.sp value from the stack, and returns to crosscall2. 81 // 82 // crosscall2 restores the callee-save registers for gcc and returns 83 // to GoF, which unpacks any result values and returns to f. 84 85 package runtime 86 87 import ( 88 "internal/abi" 89 "internal/goarch" 90 "internal/goexperiment" 91 "internal/runtime/sys" 92 "unsafe" 93 ) 94 95 // Addresses collected in a cgo backtrace when crashing. 96 // Length must match arg.Max in x_cgo_callers in runtime/cgo/gcc_traceback.c. 97 type cgoCallers [32]uintptr 98 99 // argset matches runtime/cgo/linux_syscall.c:argset_t 100 type argset struct { 101 args unsafe.Pointer 102 retval uintptr 103 } 104 105 // wrapper for syscall package to call cgocall for libc (cgo) calls. 106 // 107 //go:linkname syscall_cgocaller syscall.cgocaller 108 //go:nosplit 109 //go:uintptrescapes 110 func syscall_cgocaller(fn unsafe.Pointer, args ...uintptr) uintptr { 111 as := argset{args: unsafe.Pointer(&args[0])} 112 cgocall(fn, unsafe.Pointer(&as)) 113 return as.retval 114 } 115 116 var ncgocall uint64 // number of cgo calls in total for dead m 117 118 // Call from Go to C. 119 // 120 // This must be nosplit because it's used for syscalls on some 121 // platforms. Syscalls may have untyped arguments on the stack, so 122 // it's not safe to grow or scan the stack. 123 // 124 // cgocall should be an internal detail, 125 // but widely used packages access it using linkname. 126 // Notable members of the hall of shame include: 127 // - github.com/ebitengine/purego 128 // 129 // Do not remove or change the type signature. 130 // See go.dev/issue/67401. 131 // 132 //go:linkname cgocall 133 //go:nosplit 134 func cgocall(fn, arg unsafe.Pointer) int32 { 135 if !iscgo && GOOS != "solaris" && GOOS != "illumos" && GOOS != "windows" { 136 throw("cgocall unavailable") 137 } 138 139 if fn == nil { 140 throw("cgocall nil") 141 } 142 143 if raceenabled { 144 racereleasemerge(unsafe.Pointer(&racecgosync)) 145 } 146 147 mp := getg().m 148 mp.ncgocall++ 149 150 // Reset traceback. 151 mp.cgoCallers[0] = 0 152 153 // Announce we are entering a system call 154 // so that the scheduler knows to create another 155 // M to run goroutines while we are in the 156 // foreign code. 157 // 158 // The call to asmcgocall is guaranteed not to 159 // grow the stack and does not allocate memory, 160 // so it is safe to call while "in a system call", outside 161 // the $GOMAXPROCS accounting. 162 // 163 // fn may call back into Go code, in which case we'll exit the 164 // "system call", run the Go code (which may grow the stack), 165 // and then re-enter the "system call" reusing the PC and SP 166 // saved by entersyscall here. 167 entersyscall() 168 169 // Tell asynchronous preemption that we're entering external 170 // code. We do this after entersyscall because this may block 171 // and cause an async preemption to fail, but at this point a 172 // sync preemption will succeed (though this is not a matter 173 // of correctness). 174 osPreemptExtEnter(mp) 175 176 mp.incgo = true 177 // We use ncgo as a check during execution tracing for whether there is 178 // any C on the call stack, which there will be after this point. If 179 // there isn't, we can use frame pointer unwinding to collect call 180 // stacks efficiently. This will be the case for the first Go-to-C call 181 // on a stack, so it's preferable to update it here, after we emit a 182 // trace event in entersyscall above. 183 mp.ncgo++ 184 185 errno := asmcgocall(fn, arg) 186 187 // Update accounting before exitsyscall because exitsyscall may 188 // reschedule us on to a different M. 189 mp.incgo = false 190 mp.ncgo-- 191 192 osPreemptExtExit(mp) 193 194 // After exitsyscall we can be rescheduled on a different M, 195 // so we need to restore the original M's winsyscall. 196 winsyscall := mp.winsyscall 197 198 exitsyscall() 199 200 getg().m.winsyscall = winsyscall 201 202 // Note that raceacquire must be called only after exitsyscall has 203 // wired this M to a P. 204 if raceenabled { 205 raceacquire(unsafe.Pointer(&racecgosync)) 206 } 207 208 if sys.DITSupported { 209 // C code may have enabled or disabled DIT on this thread, restore 210 // our state to the expected one. 211 ditEnabled := sys.DITEnabled() 212 gp := getg() 213 if !gp.ditWanted && ditEnabled { 214 sys.DisableDIT() 215 } else if gp.ditWanted && !ditEnabled { 216 sys.EnableDIT() 217 } 218 } 219 220 // From the garbage collector's perspective, time can move 221 // backwards in the sequence above. If there's a callback into 222 // Go code, GC will see this function at the call to 223 // asmcgocall. When the Go call later returns to C, the 224 // syscall PC/SP is rolled back and the GC sees this function 225 // back at the call to entersyscall. Normally, fn and arg 226 // would be live at entersyscall and dead at asmcgocall, so if 227 // time moved backwards, GC would see these arguments as dead 228 // and then live. Prevent these undead arguments from crashing 229 // GC by forcing them to stay live across this time warp. 230 KeepAlive(fn) 231 KeepAlive(arg) 232 KeepAlive(mp) 233 234 return errno 235 } 236 237 // Set or reset the system stack bounds for a callback on sp. 238 // 239 // Must be nosplit because it is called by needm prior to fully initializing 240 // the M. 241 // 242 //go:nosplit 243 func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) { 244 g0 := mp.g0 245 246 if !mp.isextra { 247 // We allocated the stack for standard Ms. Don't replace the 248 // stack bounds with estimated ones when we already initialized 249 // with the exact ones. 250 return 251 } 252 253 inBound := sp > g0.stack.lo && sp <= g0.stack.hi 254 if inBound && mp.g0StackAccurate { 255 // This M has called into Go before and has the stack bounds 256 // initialized. We have the accurate stack bounds, and the SP 257 // is in bounds. We expect it continues to run within the same 258 // bounds. 259 return 260 } 261 262 // We don't have an accurate stack bounds (either it never calls 263 // into Go before, or we couldn't get the accurate bounds), or the 264 // current SP is not within the previous bounds (the stack may have 265 // changed between calls). We need to update the stack bounds. 266 // 267 // N.B. we need to update the stack bounds even if SP appears to 268 // already be in bounds, if our bounds are estimated dummy bounds 269 // (below). We may be in a different region within the same actual 270 // stack bounds, but our estimates were not accurate. Or the actual 271 // stack bounds could have shifted but still have partial overlap with 272 // our dummy bounds. If we failed to update in that case, we could find 273 // ourselves seemingly called near the bottom of the stack bounds, where 274 // we quickly run out of space. 275 276 // Set the stack bounds to match the current stack. If we don't 277 // actually know how big the stack is, like we don't know how big any 278 // scheduling stack is, but we assume there's at least 32 kB. If we 279 // can get a more accurate stack bound from pthread, use that, provided 280 // it actually contains SP. 281 g0.stack.hi = sp + 1024 282 g0.stack.lo = sp - 32*1024 283 mp.g0StackAccurate = false 284 if !signal && _cgo_getstackbound != nil { 285 // Don't adjust if called from the signal handler. 286 // We are on the signal stack, not the pthread stack. 287 // (We could get the stack bounds from sigaltstack, but 288 // we're getting out of the signal handler very soon 289 // anyway. Not worth it.) 290 var bounds [2]uintptr 291 asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds)) 292 // getstackbound is an unsupported no-op on Windows. 293 // 294 // On Unix systems, if the API to get accurate stack bounds is 295 // not available, it returns zeros. 296 // 297 // Don't use these bounds if they don't contain SP. Perhaps we 298 // were called by something not using the standard thread 299 // stack. 300 if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] { 301 g0.stack.lo = bounds[0] 302 g0.stack.hi = bounds[1] 303 mp.g0StackAccurate = true 304 } 305 } 306 g0.stackguard0 = g0.stack.lo + stackGuard 307 g0.stackguard1 = g0.stackguard0 308 } 309 310 // Call from C back to Go. fn must point to an ABIInternal Go entry-point. 311 // 312 //go:nosplit 313 func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) { 314 gp := getg() 315 if gp != gp.m.curg { 316 println("runtime: bad g in cgocallback") 317 exit(2) 318 } 319 320 sp := gp.m.g0.sched.sp // system sp saved by cgocallback. 321 oldStack := gp.m.g0.stack 322 oldAccurate := gp.m.g0StackAccurate 323 callbackUpdateSystemStack(gp.m, sp, false) 324 325 // The call from C is on gp.m's g0 stack, so we must ensure 326 // that we stay on that M. We have to do this before calling 327 // exitsyscall, since it would otherwise be free to move us to 328 // a different M. The call to unlockOSThread is in this function 329 // after cgocallbackg1, or in the case of panicking, in unwindm. 330 lockOSThread() 331 332 checkm := gp.m 333 334 // Save current syscall parameters, so m.winsyscall can be 335 // used again if callback decide to make syscall. 336 winsyscall := gp.m.winsyscall 337 338 // entersyscall saves the caller's SP to allow the GC to trace the Go 339 // stack. However, since we're returning to an earlier stack frame and 340 // need to pair with the entersyscall() call made by cgocall, we must 341 // save syscall* and let reentersyscall restore them. 342 // 343 // Note: savedsp and savedbp MUST be held in locals as an unsafe.Pointer. 344 // When we call into Go, the stack is free to be moved. If these locals 345 // aren't visible in the stack maps, they won't get updated properly, 346 // and will end up being stale when restored by reentersyscall. 347 savedsp := unsafe.Pointer(gp.syscallsp) 348 savedpc := gp.syscallpc 349 savedbp := unsafe.Pointer(gp.syscallbp) 350 exitsyscall() // coming out of cgo call 351 gp.m.incgo = false 352 if gp.m.isextra { 353 gp.m.isExtraInC = false 354 } 355 356 osPreemptExtExit(gp.m) 357 358 if gp.nocgocallback { 359 panic("runtime: function marked with #cgo nocallback called back into Go") 360 } 361 362 cgocallbackg1(fn, frame, ctxt) 363 364 // At this point we're about to call unlockOSThread. 365 // The following code must not change to a different m. 366 // This is enforced by checking incgo in the schedule function. 367 gp.m.incgo = true 368 unlockOSThread() 369 370 if gp.m.isextra && gp.m.ncgo == 0 { 371 // There are no active cgocalls above this frame (ncgo == 0), 372 // thus there can't be more Go frames above this frame. 373 gp.m.isExtraInC = true 374 } 375 376 if gp.m != checkm { 377 throw("m changed unexpectedly in cgocallbackg") 378 } 379 380 osPreemptExtEnter(gp.m) 381 382 // going back to cgo call 383 reentersyscall(savedpc, uintptr(savedsp), uintptr(savedbp)) 384 385 gp.m.winsyscall = winsyscall 386 387 // Restore the old g0 stack bounds 388 gp.m.g0.stack = oldStack 389 gp.m.g0.stackguard0 = oldStack.lo + stackGuard 390 gp.m.g0.stackguard1 = gp.m.g0.stackguard0 391 gp.m.g0StackAccurate = oldAccurate 392 } 393 394 func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) { 395 gp := getg() 396 397 if gp.m.needextram || extraMWaiters.Load() > 0 { 398 gp.m.needextram = false 399 systemstack(newextram) 400 } 401 402 if ctxt != 0 { 403 s := append(gp.cgoCtxt, ctxt) 404 405 // Now we need to set gp.cgoCtxt = s, but we could get 406 // a SIGPROF signal while manipulating the slice, and 407 // the SIGPROF handler could pick up gp.cgoCtxt while 408 // tracing up the stack. We need to ensure that the 409 // handler always sees a valid slice, so set the 410 // values in an order such that it always does. 411 p := (*slice)(unsafe.Pointer(&gp.cgoCtxt)) 412 atomicstorep(unsafe.Pointer(&p.array), unsafe.Pointer(&s[0])) 413 p.cap = cap(s) 414 p.len = len(s) 415 416 defer func(gp *g) { 417 // Decrease the length of the slice by one, safely. 418 p := (*slice)(unsafe.Pointer(&gp.cgoCtxt)) 419 p.len-- 420 }(gp) 421 } 422 423 if gp.m.ncgo == 0 { 424 // The C call to Go came from a thread not currently running 425 // any Go. In the case of -buildmode=c-archive or c-shared, 426 // this call may be coming in before package initialization 427 // is complete. Wait until it is. 428 <-main_init_done 429 } 430 431 // Check whether the profiler needs to be turned on or off; this route to 432 // run Go code does not use runtime.execute, so bypasses the check there. 433 hz := sched.profilehz 434 if gp.m.profilehz != hz { 435 setThreadCPUProfiler(hz) 436 } 437 438 // Add entry to defer stack in case of panic. 439 restore := true 440 defer unwindm(&restore) 441 442 var ditStateM, ditStateG bool 443 if debug.dataindependenttiming == 1 && gp.m.isextra { 444 // We only need to enable DIT for threads that were created by C, as it 445 // should already by enabled on threads that were created by Go. 446 ditStateM = sys.EnableDIT() 447 } else if sys.DITSupported && debug.dataindependenttiming != 1 { 448 // C code may have enabled or disabled DIT on this thread. Set the flag 449 // on the M and G accordingly, saving their previous state to restore 450 // on return from the callback. 451 ditStateM, ditStateG = gp.m.ditEnabled, gp.ditWanted 452 ditEnabled := sys.DITEnabled() 453 gp.ditWanted = ditEnabled 454 gp.m.ditEnabled = ditEnabled 455 } 456 457 if raceenabled { 458 raceacquire(unsafe.Pointer(&racecgosync)) 459 } 460 461 // Invoke callback. This function is generated by cmd/cgo and 462 // will unpack the argument frame and call the Go function. 463 var cb func(frame unsafe.Pointer) 464 cbFV := funcval{uintptr(fn)} 465 *(*unsafe.Pointer)(unsafe.Pointer(&cb)) = noescape(unsafe.Pointer(&cbFV)) 466 cb(frame) 467 468 if raceenabled { 469 racereleasemerge(unsafe.Pointer(&racecgosync)) 470 } 471 472 if debug.dataindependenttiming == 1 && !ditStateM { 473 // Only unset DIT if it wasn't already enabled when cgocallback was called. 474 sys.DisableDIT() 475 } else if sys.DITSupported && debug.dataindependenttiming != 1 { 476 // Restore DIT state on M and G. 477 gp.ditWanted = ditStateG 478 gp.m.ditEnabled = ditStateM 479 if !ditStateM { 480 sys.DisableDIT() 481 } 482 } 483 484 // Do not unwind m->g0->sched.sp. 485 // Our caller, cgocallback, will do that. 486 restore = false 487 } 488 489 func unwindm(restore *bool) { 490 if *restore { 491 // Restore sp saved by cgocallback during 492 // unwind of g's stack (see comment at top of file). 493 mp := acquirem() 494 sched := &mp.g0.sched 495 sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + alignUp(sys.MinFrameSize, sys.StackAlign))) 496 497 // Do the accounting that cgocall will not have a chance to do 498 // during an unwind. 499 // 500 // In the case where a Go call originates from C, ncgo is 0 501 // and there is no matching cgocall to end. 502 if mp.ncgo > 0 { 503 mp.incgo = false 504 mp.ncgo-- 505 osPreemptExtExit(mp) 506 } 507 508 // Undo the call to lockOSThread in cgocallbackg, only on the 509 // panicking path. In normal return case cgocallbackg will call 510 // unlockOSThread, ensuring no preemption point after the unlock. 511 // Here we don't need to worry about preemption, because we're 512 // panicking out of the callback and unwinding the g0 stack, 513 // instead of reentering cgo (which requires the same thread). 514 unlockOSThread() 515 516 releasem(mp) 517 } 518 } 519 520 // called from assembly. 521 func badcgocallback() { 522 throw("misaligned stack in cgocallback") 523 } 524 525 // called from (incomplete) assembly. 526 func cgounimpl() { 527 throw("cgo not implemented") 528 } 529 530 var racecgosync uint64 // represents possible synchronization in C code 531 532 // Pointer checking for cgo code. 533 534 // We want to detect all cases where a program that does not use 535 // unsafe makes a cgo call passing a Go pointer to memory that 536 // contains an unpinned Go pointer. Here a Go pointer is defined as a 537 // pointer to memory allocated by the Go runtime. Programs that use 538 // unsafe can evade this restriction easily, so we don't try to catch 539 // them. The cgo program will rewrite all possibly bad pointer 540 // arguments to call cgoCheckPointer, where we can catch cases of a Go 541 // pointer pointing to an unpinned Go pointer. 542 543 // Complicating matters, taking the address of a slice or array 544 // element permits the C program to access all elements of the slice 545 // or array. In that case we will see a pointer to a single element, 546 // but we need to check the entire data structure. 547 548 // The cgoCheckPointer call takes additional arguments indicating that 549 // it was called on an address expression. An additional argument of 550 // true means that it only needs to check a single element. An 551 // additional argument of a slice or array means that it needs to 552 // check the entire slice/array, but nothing else. Otherwise, the 553 // pointer could be anything, and we check the entire heap object, 554 // which is conservative but safe. 555 556 // When and if we implement a moving garbage collector, 557 // cgoCheckPointer will pin the pointer for the duration of the cgo 558 // call. (This is necessary but not sufficient; the cgo program will 559 // also have to change to pin Go pointers that cannot point to Go 560 // pointers.) 561 562 // cgoCheckPointer checks if the argument contains a Go pointer that 563 // points to an unpinned Go pointer, and panics if it does. 564 func cgoCheckPointer(ptr any, arg any) { 565 if !goexperiment.CgoCheck2 && debug.cgocheck == 0 { 566 return 567 } 568 569 ep := efaceOf(&ptr) 570 t := ep._type 571 572 top := true 573 if arg != nil && (t.Kind() == abi.Pointer || t.Kind() == abi.UnsafePointer) { 574 p := ep.data 575 if !t.IsDirectIface() { 576 p = *(*unsafe.Pointer)(p) 577 } 578 if p == nil || !cgoIsGoPointer(p) { 579 return 580 } 581 aep := efaceOf(&arg) 582 switch aep._type.Kind() { 583 case abi.Bool: 584 if t.Kind() == abi.UnsafePointer { 585 // We don't know the type of the element. 586 break 587 } 588 pt := (*ptrtype)(unsafe.Pointer(t)) 589 cgoCheckArg(pt.Elem, p, true, false, cgoCheckPointerFail) 590 return 591 case abi.Slice: 592 // Check the slice rather than the pointer. 593 ep = aep 594 t = ep._type 595 case abi.Array: 596 // Check the array rather than the pointer. 597 // Pass top as false since we have a pointer 598 // to the array. 599 ep = aep 600 t = ep._type 601 top = false 602 case abi.Pointer: 603 // The Go code is indexing into a pointer to an array, 604 // and we have been passed the pointer-to-array. 605 // Check the array rather than the pointer. 606 pt := (*abi.PtrType)(unsafe.Pointer(aep._type)) 607 t = pt.Elem 608 if t.Kind() != abi.Array { 609 throw("can't happen") 610 } 611 ep = aep 612 top = false 613 default: 614 throw("can't happen") 615 } 616 } 617 618 cgoCheckArg(t, ep.data, !t.IsDirectIface(), top, cgoCheckPointerFail) 619 } 620 621 type cgoErrorMsg int 622 623 const ( 624 cgoCheckPointerFail cgoErrorMsg = iota 625 cgoResultFail 626 ) 627 628 // cgoCheckArg is the real work of cgoCheckPointer and cgoCheckResult. 629 // The argument p is either a pointer to the value (of type t), or the value 630 // itself, depending on indir. The top parameter is whether we are at the top 631 // level, where Go pointers are allowed. Go pointers to pinned objects are 632 // allowed as long as they don't reference other unpinned pointers. 633 func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg cgoErrorMsg) { 634 if !t.Pointers() || p == nil { 635 // If the type has no pointers there is nothing to do. 636 return 637 } 638 639 switch t.Kind() { 640 default: 641 throw("can't happen") 642 case abi.Array: 643 at := (*arraytype)(unsafe.Pointer(t)) 644 if !indir { 645 if at.Len != 1 { 646 throw("can't happen") 647 } 648 cgoCheckArg(at.Elem, p, !at.Elem.IsDirectIface(), top, msg) 649 return 650 } 651 for i := uintptr(0); i < at.Len; i++ { 652 cgoCheckArg(at.Elem, p, true, top, msg) 653 p = add(p, at.Elem.Size_) 654 } 655 case abi.Chan, abi.Map: 656 // These types contain internal pointers that will 657 // always be allocated in the Go heap. It's never OK 658 // to pass them to C. 659 panic(cgoFormatErr(msg, t.Kind())) 660 case abi.Func: 661 if indir { 662 p = *(*unsafe.Pointer)(p) 663 } 664 if !cgoIsGoPointer(p) { 665 return 666 } 667 panic(cgoFormatErr(msg, t.Kind())) 668 case abi.Interface: 669 it := *(**_type)(p) 670 if it == nil { 671 return 672 } 673 // A type known at compile time is OK since it's 674 // constant. A type not known at compile time will be 675 // in the heap and will not be OK. 676 if inheap(uintptr(unsafe.Pointer(it))) { 677 panic(cgoFormatErr(msg, t.Kind())) 678 } 679 p = *(*unsafe.Pointer)(add(p, goarch.PtrSize)) 680 if !cgoIsGoPointer(p) { 681 return 682 } 683 if !top && !isPinned(p) { 684 panic(cgoFormatErr(msg, t.Kind())) 685 } 686 cgoCheckArg(it, p, !it.IsDirectIface(), false, msg) 687 case abi.Slice: 688 st := (*slicetype)(unsafe.Pointer(t)) 689 s := (*slice)(p) 690 p = s.array 691 if p == nil || !cgoIsGoPointer(p) { 692 return 693 } 694 if !top && !isPinned(p) { 695 panic(cgoFormatErr(msg, t.Kind())) 696 } 697 if !st.Elem.Pointers() { 698 return 699 } 700 for i := 0; i < s.cap; i++ { 701 cgoCheckArg(st.Elem, p, true, false, msg) 702 p = add(p, st.Elem.Size_) 703 } 704 case abi.String: 705 ss := (*stringStruct)(p) 706 if !cgoIsGoPointer(ss.str) { 707 return 708 } 709 if !top && !isPinned(ss.str) { 710 panic(cgoFormatErr(msg, t.Kind())) 711 } 712 case abi.Struct: 713 st := (*structtype)(unsafe.Pointer(t)) 714 if !indir { 715 if len(st.Fields) != 1 { 716 throw("can't happen") 717 } 718 cgoCheckArg(st.Fields[0].Typ, p, !st.Fields[0].Typ.IsDirectIface(), top, msg) 719 return 720 } 721 for _, f := range st.Fields { 722 if !f.Typ.Pointers() { 723 continue 724 } 725 cgoCheckArg(f.Typ, add(p, f.Offset), true, top, msg) 726 } 727 case abi.Pointer, abi.UnsafePointer: 728 if indir { 729 p = *(*unsafe.Pointer)(p) 730 if p == nil { 731 return 732 } 733 } 734 735 if !cgoIsGoPointer(p) { 736 return 737 } 738 if !top && !isPinned(p) { 739 panic(cgoFormatErr(msg, t.Kind())) 740 } 741 742 cgoCheckUnknownPointer(p, msg) 743 } 744 } 745 746 // cgoCheckUnknownPointer is called for an arbitrary pointer into Go 747 // memory. It checks whether that Go memory contains any other 748 // pointer into unpinned Go memory. If it does, we panic. 749 // The return values are unused but useful to see in panic tracebacks. 750 func cgoCheckUnknownPointer(p unsafe.Pointer, msg cgoErrorMsg) (base, i uintptr) { 751 if inheap(uintptr(p)) { 752 b, span, _ := findObject(uintptr(p), 0, 0) 753 base = b 754 if base == 0 { 755 return 756 } 757 tp := span.typePointersOfUnchecked(base) 758 for { 759 var addr uintptr 760 if tp, addr = tp.next(base + span.elemsize); addr == 0 { 761 break 762 } 763 pp := *(*unsafe.Pointer)(unsafe.Pointer(addr)) 764 if cgoIsGoPointer(pp) && !isPinned(pp) { 765 panic(cgoFormatErr(msg, abi.Pointer)) 766 } 767 } 768 return 769 } 770 771 for _, datap := range activeModules() { 772 if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) { 773 // We have no way to know the size of the object. 774 // We have to assume that it might contain a pointer. 775 panic(cgoFormatErr(msg, abi.Pointer)) 776 } 777 // In the text or noptr sections, we know that the 778 // pointer does not point to a Go pointer. 779 } 780 781 return 782 } 783 784 // cgoIsGoPointer reports whether the pointer is a Go pointer--a 785 // pointer to Go memory. We only care about Go memory that might 786 // contain pointers. 787 // 788 //go:nosplit 789 //go:nowritebarrierrec 790 func cgoIsGoPointer(p unsafe.Pointer) bool { 791 if p == nil { 792 return false 793 } 794 795 if inHeapOrStack(uintptr(p)) { 796 return true 797 } 798 799 for _, datap := range activeModules() { 800 if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) { 801 return true 802 } 803 } 804 805 return false 806 } 807 808 // cgoInRange reports whether p is between start and end. 809 // 810 //go:nosplit 811 //go:nowritebarrierrec 812 func cgoInRange(p unsafe.Pointer, start, end uintptr) bool { 813 return start <= uintptr(p) && uintptr(p) < end 814 } 815 816 // cgoCheckResult is called to check the result parameter of an 817 // exported Go function. It panics if the result is or contains any 818 // other pointer into unpinned Go memory. 819 func cgoCheckResult(val any) { 820 if !goexperiment.CgoCheck2 && debug.cgocheck == 0 { 821 return 822 } 823 824 ep := efaceOf(&val) 825 t := ep._type 826 if t == nil { 827 return 828 } 829 cgoCheckArg(t, ep.data, !t.IsDirectIface(), false, cgoResultFail) 830 } 831 832 // cgoFormatErr is called by cgoCheckArg and cgoCheckUnknownPointer 833 // to format panic error messages. 834 func cgoFormatErr(error cgoErrorMsg, kind abi.Kind) errorString { 835 var msg, kindname string 836 var cgoFunction string = "unknown" 837 var offset int 838 var buf [20]byte 839 840 // We expect one of these abi.Kind from cgoCheckArg 841 switch kind { 842 case abi.Chan: 843 kindname = "channel" 844 case abi.Func: 845 kindname = "function" 846 case abi.Interface: 847 kindname = "interface" 848 case abi.Map: 849 kindname = "map" 850 case abi.Pointer: 851 kindname = "pointer" 852 case abi.Slice: 853 kindname = "slice" 854 case abi.String: 855 kindname = "string" 856 case abi.Struct: 857 kindname = "struct" 858 case abi.UnsafePointer: 859 kindname = "unsafe pointer" 860 default: 861 kindname = "pointer" 862 } 863 864 // The cgo function name might need an offset to be obtained 865 if error == cgoResultFail { 866 offset = 21 867 } 868 869 // Relatively to cgoFormatErr, this is the stack frame: 870 // 0. cgoFormatErr 871 // 1. cgoCheckArg or cgoCheckUnknownPointer 872 // 2. cgoCheckPointer or cgoCheckResult 873 // 3. cgo function 874 pc, path, line, ok := Caller(3) 875 if ok && error == cgoResultFail { 876 function := FuncForPC(pc) 877 878 if function != nil { 879 // Expected format of cgo function name: 880 // - caller: _cgoexp_3c910ddb72c4_foo 881 if offset > len(function.Name()) { 882 cgoFunction = function.Name() 883 } else { 884 cgoFunction = function.Name()[offset:] 885 } 886 } 887 } 888 889 switch error { 890 case cgoResultFail: 891 msg = path + ":" + string(itoa(buf[:], uint64(line))) 892 msg += ": result of Go function " + cgoFunction + " called from cgo" 893 msg += " is unpinned Go " + kindname + " or points to unpinned Go " + kindname 894 case cgoCheckPointerFail: 895 msg += "argument of cgo function has Go pointer to unpinned Go " + kindname 896 } 897 898 return errorString(msg) 899 } 900