其实在看go源码的时候,发现除了sync包里有个mutex以外,runtime包里也有一个mutex,这个mutex在runtime很多地方都在用。
这个runtime包里面的mutex的结构如下:
目录: /runtime/runtime2.go
代码:
go
/runtime/runtime2.go
type mutex struct {
lockRankStruct
key uintptr
}
可以看到他有两个成员,一个是lockRankStruct,一个是key。
先看下lockRankStruct:
lockRankStruct 这个结构体是个空的结构体,在网上有大佬这样描述:
lockRankStruct提供了一种运行时的静态锁排名的机制。静态锁排名会建立文件化的锁之间的总排序顺序,如果违反总的排序,则会报错。只要锁排序是按照文档设计的顺序,锁排序死锁就不会发生。如果要做Go运行时使用这个机制,你需要设置
GOEXPERIMENT=staticlockranking
。 默认未开启,此时lockRanStruct
是一个空结构体。lockWithRank()
等效于lock()
啥意思啊?按照文档设计顺序,文档是啥,文档顺序是啥?
其实这里是一种非常机翻的说法,他其实说的是在go的配置中启动GOEXPERIMENT=staticlockranking,
就会按照一定的顺序检查锁,如果锁不是按照这个顺序进行了,就会出现死锁的情况,那么就会抛出throw(在runtime包内部很多错误是throw,不是panic,throw方法并没有暴露给用户使用,用户只能用recover接panic,也就说用户无法处理throw错误,项目会直接崩掉)
那这个顺序是哪来的?
这个顺序其实是已经规定好的,当然是go语言的开发人员提交的,提交这个changes的地址是go-review.googlesource.com/c/go/+/2076...
他这个顺序其实是一个map,一个已经规定好的map,lockPartialOrder,位置是runtime包中的lockrank.go
go
var lockPartialOrder [][]lockRank = [][]lockRank{
lockRankSysmon: {},
lockRankScavenge: {lockRankSysmon},
lockRankForcegc: {lockRankSysmon},
lockRankDefer: {},
lockRankSweepWaiters: {},
lockRankAssistQueue: {},
lockRankSweep: {},
lockRankPollDesc: {},
lockRankCpuprof: {},
lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof},
lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched},
lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched},
lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllp, lockRankTimers},
lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllp, lockRankTimers},
lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankHchan},
lockRankNotifyList: {},
lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankHchan, lockRankNotifyList},
lockRankRwmutexW: {},
lockRankRwmutexR: {lockRankSysmon, lockRankRwmutexW},
lockRankRoot: {},
lockRankItab: {},
lockRankReflectOffs: {lockRankItab},
lockRankUserArenaState: {},
lockRankTraceBuf: {lockRankSysmon, lockRankScavenge},
lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf},
lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive},
lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture},
lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf},
lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans},
lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMheapSpecial, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace},
lockRankPanic: {},
lockRankDeadlock: {lockRankPanic, lockRankDeadlock},
}
其实这玩意称之为"文档"多少有点不太直观,但是你知道是什么意思就行,不必纠结这个命名,很多命名有一定的历史因素或者直译不够信雅达,追求这个考古结果太浪费时间
那这个顺序是怎么验证的?
其实验证这个顺序的方法也不复杂,就在runtime/lockrank_on.go文件里的checkRanks方法里
go
func checkRanks(gp *g, prevRank, rank lockRank) {
rankOK := false
if rank < prevRank {
// If rank < prevRank, then we definitely have a rank error
rankOK = false
} else if rank == lockRankLeafRank {
// If new lock is a leaf lock, then the preceding lock can
// be anything except another leaf lock.
rankOK = prevRank < lockRankLeafRank
} else {
// We've now verified the total lock ranking, but we
// also enforce the partial ordering specified by
// lockPartialOrder as well. Two locks with the same rank
// can only be acquired at the same time if explicitly
// listed in the lockPartialOrder table.
list := lockPartialOrder[rank]
for _, entry := range list {
if entry == prevRank {
rankOK = true
break
}
}
}
if !rankOK {
printlock()
println(gp.m.procid, " ======")
printHeldLocks(gp)
throw("lock ordering problem")
}
}
不知道这个有啥影响么?
没有,不开启GOEXPERIMENT=staticlockranking
都不用关心这个问题,不看源码甚至不知道这个玩意,不必太在意,看到了当个乐子就行
再看下key:
key的实现方式有两种
runtime/lock_futex.go 文件里的futex实现
go:build dragonfly || freebsd || linux
主要是dragonfly,FreeBSD,linux 系统;
另一个就是 runtime/lock_sema.go 文件里的 sema实现
go:build aix || darwin || netbsd || openbsd || plan9 || solaris || windows
主要是 aix , darwin , netbsd , openbsd , plan9 , solaris , windows系统
但是实现起来也是大差不差的。
以linux的为例,毕竟日常开发还是linux的项目比较多。
首先,锁的状态有三种
go
const (
mutex_unlocked = 0
mutex_locked = 1
mutex_sleeping = 2
- mutex_unlocked = 该锁没有被锁定(没有G获得锁)
- mutex_locked = 锁了
- mutex_sleeping = 表示有线程调用futexsleep阻塞了
然后看lock方法,这个是获取锁的方法
go
func lock2(l *mutex) {
// 获取当前运行该方法的协程G
gp := getg()
// 锁都是负数了,出问题了,报错,throw扔出去
if gp.m.locks < 0 {
throw("runtime·lock: lock count")
}
// 当前协程G绑定的M上的lock计数+1,不懂的去看GMP结构
gp.m.locks++
// 资源比较空闲时候,第一次请求锁就成功了,直接返回,表示获取锁成功
// Xchg直接交换值,没有CAS操作,Xchg比较简单,理论上更快,Cas还要做值对比,Xchg更适合去抢锁
v := atomic.Xchg(key32(&l.key), mutex_locked)
if v == mutex_unlocked {
return
}
// 大部分情况下,你获取是失败的,毕竟加锁的场景就是高并发引起的冲突,不然你加个毛的锁
// 当v是mutex_unlock和mutex_sleeping的时候,也就说加锁没有成功
// 修改名称把状态改成wait------等待
wait := v
// 自旋次数,如果ncpu也就是cpu核心数大于1,说明是多核cpu,那么就自旋等待下,active_spin实际上是4,那就是自旋4次,至于为什么是4,我没深究
spin := 0
if ncpu > 1 {
// spin是spinning的意思"旋转的",你打红警的时候盖特机炮的台词就有spin
spin = active_spin
}
for {
// 尝试获取锁
for i := 0; i < spin; i++ {
for l.key == mutex_unlocked {
// Cas操作,获取锁。这里的wait的值第一次是上面传入的,第二次则是从for循环末尾拿到的
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
return
}
}
// 这个方法其实是底层调用了CPU的PAUSE指令,意在优化自旋等待的效率,主要是针对赛扬和志强cpu的
procyield(active_spin_cnt)
}
// passive_spin = 1,passive -- 被动的,被动自旋,就是说上面4次自旋都没拿到,再给你一次机会,让你再获取一次锁,所以整个获取锁的自旋次数是 4 + 1
for i := 0; i < passive_spin; i++ {
for l.key == mutex_unlocked {
// 同样的CAS操作获取锁
if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
return
}
}
// 让出cpu
osyield()
}
// 切换锁的状态为sleeping,xchg返回原来的值
v = atomic.Xchg(key32(&l.key), mutex_sleeping)
// 如果原来的值是mutex_unlocked,也就说无锁,那么mutex_sleeping 本身就算拿到锁了
if v == mutex_unlocked {
return
}
// 重新设定wait值为mutex_sleeping为下次循环做准备
wait = mutex_sleeping
// futexsleep调用futex函数进入睡眠。
futexsleep(key32(&l.key), mutex_sleeping, -1)
}
}
最后看unlock方法,这个是解锁的方法
go
func unlock2(l *mutex) {
// 设置 l.key = mutex_unlocked。v是旧值
v := atomic.Xchg(key32(&l.key), mutex_unlocked)
// 如果已经是unlocked 你还执行解锁,那么就抛出throw异常
if v == mutex_unlocked {
throw("unlock of unlocked lock")
}
// 如果旧状态是sleeping,说明已经有其他协程在等待这个锁,处于sleep状态,这时候解锁的时候,要把那个sleep的协程唤醒
if v == mutex_sleeping {
// 执行futexwakeup
futexwakeup(key32(&l.key), 1)
}
// 获取当前的goroutine
gp := getg()
// 当前g所属的M的锁计数器减一
gp.m.locks--
if gp.m.locks < 0 {
throw("runtime·unlock: lock count")
}
// m所有的锁已经释放且g本身的preempt位在解锁(unlock)之前就被被标记为true,说明该g可以被抢占了
if gp.m.locks == 0 && gp.preempt {
// 既然需要被抢占,那么就要设置stackguard0位置为stackPreempt,手动标记为需要检查栈溢出,当调度器检查到栈溢出的时候会根据Goroutine的标记进行相应处理,这里就是减产preempt并执行抢占
gp.stackguard0 = stackPreempt
}
}
大致就是这样了