一、同步執(zhí)行dispatch_sync
1.1 鎖的原因
我們都知道,當(dāng)使用dispatch_sync
在串行隊(duì)列上執(zhí)行時(shí),會(huì)形成dispatch_sync
塊任務(wù)和內(nèi)部執(zhí)行任務(wù)的相互等待,從而造成死鎖崩潰。那么我就從這個(gè)問題來觸發(fā),看一下為什么會(huì)造成死鎖,從而了解同步執(zhí)行的原理
我們通過源碼找到了dispatch_sync
的調(diào)用如下,由于unlikely
一般運(yùn)行的較少,多為容錯(cuò)處理,所以我們先跟主流程,最終來到了函數(shù)_dispatch_sync_f_inline
中
void
dispatch_sync(dispatch_queue_t dq, dispatch_block_t work)
{
uintptr_t dc_flags = DC_FLAG_BLOCK;
if (unlikely(_dispatch_block_has_private_data(work))) {
return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
}
_dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}
-----------------------------------------------------------------------------------------
static void
_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
uintptr_t dc_flags)
{
_dispatch_sync_f_inline(dq, ctxt, func, dc_flags);
}
-----------------------------------------------------------------------------------------
在_dispatch_sync_f_inline
中發(fā)現(xiàn)了一個(gè)判斷likely(dq->dq_width == 1
,通過之前隊(duì)列的原理我們可以知道,串行隊(duì)列的width
是為1
的,所以串行的執(zhí)行方法,是在_dispatch_barrier_sync_f
中的。
而且根據(jù)函數(shù)名,我們可以知道_dispatch_barrier
是之前講的柵欄函數(shù)的調(diào)用,所以說柵欄函數(shù)也會(huì)走到此方法中。
由于我們先找死鎖的原因,所以在這里就先不看下面并發(fā)的邏輯了。
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func, uintptr_t dc_flags)
{
?// 串行 來到這里
if (likely(dq->dq_width == 1)) {
return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags);
}
if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
}
dispatch_lane_t dl = upcast(dq)._dl;
// Global concurrent queues and queues bound to non-dispatch threads
// always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags);
}
if (unlikely(dq->do_targetq->do_targetq)) {
return _dispatch_sync_recurse(dl, ctxt, func, dc_flags);
}
_dispatch_introspection_sync_begin(dl);
_dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG(
_dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags)));
}
-----------------------------------------------------------------------------------------
static void
_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func, uintptr_t dc_flags)
{
_dispatch_barrier_sync_f_inline(dq, ctxt, func, dc_flags);
}
最終,我們來到了_dispatch_barrier_sync_f_inline
函數(shù)中。
首先執(zhí)行了_dispatch_tid_self
方法。通過源碼跟蹤,我們可以發(fā)現(xiàn)其為宏定義的方法,底層主要執(zhí)行了_dispatch_thread_getspecific
。這個(gè)函數(shù)書主要是通過KeyValue
的方式來獲取線程的一些信息。在這里就是獲取當(dāng)前線程的tid
,即唯一ID。
我們知道,造成死鎖的原因就是串行隊(duì)列上任務(wù)的相互等待。那么必然會(huì)通過tid
來判斷是否滿足條件,從而找到了_dispatch_queue_try_acquire_barrier_sync
函數(shù)
#define _dispatch_tid_self() ((dispatch_tid)_dispatch_thread_port())
#define _dispatch_thread_port() ((mach_port_t)(uintptr_t)\
_dispatch_thread_getspecific(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF))
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func, uintptr_t dc_flags)
{
?// 獲取線程ID -- mach pthread --
dispatch_tid tid = _dispatch_tid_self();
if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
}
dispatch_lane_t dl = upcast(dq)._dl;
// The more correct thing to do would be to merge the qos of the thread
// that just acquired the barrier lock into the queue state.
//
// However this is too expensive for the fast path, so skip doing it.
// The chosen tradeoff is that if an enqueue on a lower priority thread
// contends with this fast path, this thread may receive a useless override.
//
// Global concurrent queues and queues bound to non-dispatch threads
// always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
?// 死鎖
if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) {
return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
DC_FLAG_BARRIER | dc_flags);
}
if (unlikely(dl->do_targetq->do_targetq)) {
return _dispatch_sync_recurse(dl, ctxt, func,
DC_FLAG_BARRIER | dc_flags);
}
_dispatch_introspection_sync_begin(dl);
_dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func
DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
dq, ctxt, func, dc_flags | DC_FLAG_BARRIER)));
}
在函數(shù)_dispatch_queue_try_acquire_barrier_sync_and_suspend
中,從該函數(shù)我們可以知道,通過os_atomic_rmw_loop2o
函數(shù)回調(diào),從OS底層獲取到了狀態(tài)信息,并返回。
DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
static inline bool
_dispatch_queue_try_acquire_barrier_sync(dispatch_queue_class_t dq, uint32_t tid)
{
return _dispatch_queue_try_acquire_barrier_sync_and_suspend(dq._dl, tid, 0);
}
-----------------------------------------------------------------------------------------
DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
static inline bool
_dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_lane_t dq,
uint32_t tid, uint64_t suspend_count)
{
uint64_t init = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width);
uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER |
_dispatch_lock_value_from_tid(tid) |
(suspend_count * DISPATCH_QUEUE_SUSPEND_INTERVAL);
uint64_t old_state, new_state;
?// 從底層獲取信息 -- 狀態(tài)信息 - 當(dāng)前隊(duì)列 - 線程
return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
uint64_t role = old_state & DISPATCH_QUEUE_ROLE_MASK;
if (old_state != (init | role)) {
os_atomic_rmw_loop_give_up(break);
}
new_state = value | role;
});
}
那么返回之后,就執(zhí)行了_dispatch_sync_f_slow
函數(shù)。通過下圖崩潰堆棧我們也可以從側(cè)方面驗(yàn)證。
其中通過源碼可以發(fā)現(xiàn),首先是生成了一些任務(wù)的信息,然后通過_dispatch_trace_item_push
來進(jìn)行壓棧操作,從而存放在我們的同步隊(duì)列中(FIFO),從而實(shí)現(xiàn)函數(shù)的執(zhí)行。
_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt,
dispatch_function_t func, uintptr_t top_dc_flags,
dispatch_queue_class_t dqu, uintptr_t dc_flags)
{
...
pthread_priority_t pp = _dispatch_get_priority();
struct dispatch_sync_context_s dsc = {
.dc_flags = DC_FLAG_SYNC_WAITER | dc_flags,
.dc_func = _dispatch_async_and_wait_invoke,
.dc_ctxt = &dsc,
.dc_other = top_dq,
.dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG,
.dc_voucher = _voucher_get(),
.dsc_func = func,
.dsc_ctxt = ctxt,
.dsc_waiter = _dispatch_tid_self(),
};
_dispatch_trace_item_push(top_dq, &dsc);
__DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq);
...
}
那么產(chǎn)生死鎖的主要檢測(cè)就再__DISPATCH_WAIT_FOR_QUEUE__
這個(gè)函數(shù)中了,通過查看函數(shù),發(fā)現(xiàn)它會(huì)獲取到隊(duì)列的狀態(tài),看其是否為等待狀態(tài),然后調(diào)用_dq_state_drain_locked_by
中的異或運(yùn)算,判斷隊(duì)列和線程的等待狀態(tài),如果兩者都在等待,那么就會(huì)返回YES
,從而造成死鎖的崩潰。
static void
__DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq)
{
// 獲取隊(duì)列的狀態(tài),看是否是處于等待狀態(tài)
uint64_t dq_state = _dispatch_wait_prepare(dq);
if (unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))) {
DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
"dispatch_sync called on queue "
"already owned by current thread");
}
...
}
-----------------------------------------------------------------------------------------
static inline bool
_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid)
{ // lock_value 為隊(duì)列狀態(tài),tid 為線程 id
// ^ (異或運(yùn)算法) 兩個(gè)相同就會(huì)出現(xiàn) 0 否則為1
return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
}
小結(jié)一下
_dispatch_sync
首先獲取當(dāng)前線程的tid
- 獲取到系統(tǒng)底層返回的
status
- 獲取到隊(duì)列的等待狀態(tài)和tid比較,如果相同,則表示正在死鎖,從而崩潰
1.2 block任務(wù)的執(zhí)行
對(duì)于同步任務(wù)的block
執(zhí)行,我們?cè)诶^續(xù)跟進(jìn)之前的源碼_dispatch_sync
源碼中_dispatch_barrier_sync_f_inline
函數(shù),觀看其函數(shù)實(shí)現(xiàn),函數(shù)的執(zhí)行主要是在_dispatch_client_callout
方法中。
DISPATCH_NOINLINE
static void
_dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq,
void *ctxt, dispatch_function_t func DISPATCH_TRACE_ARG(void *dc))
{
_dispatch_sync_function_invoke_inline(dq, ctxt, func);
...
}
-----------------------------------------------------------------------------------------
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_sync_function_invoke_inline(dispatch_queue_class_t dq, void *ctxt,
dispatch_function_t func)
{
dispatch_thread_frame_s dtf;
_dispatch_thread_frame_push(&dtf, dq);
// f(ctxt) -- func(ctxt)
_dispatch_client_callout(ctxt, func);
_dispatch_perfmon_workitem_inc();
_dispatch_thread_frame_pop(&dtf);
}
查看_dispatch_client_callout
方法,里面果然有函數(shù)的調(diào)用f(ctxt);
至此,同步函數(shù)的block
調(diào)用完成
_dispatch_client_callout(void *ctxt, dispatch_function_t f)
{
_dispatch_get_tsd_base();
void *u = _dispatch_get_unwind_tsd();
if (likely(!u)) return f(ctxt);
_dispatch_set_unwind_tsd(NULL);
f(ctxt);
_dispatch_free_unwind_tsd();
_dispatch_set_unwind_tsd(u);
}
小結(jié)一下
同步函數(shù)的block調(diào)用步驟:
dispatch_sync
└──_dispatch_barrier_sync_f_inline
└──_dispatch_sync_invoke_and_complete
└──_dispatch_sync_function_invoke_inline
└──_dispatch_client_callout
└──f(ctxt);
二、 異步執(zhí)行 dispatch_async
看完了同步執(zhí)行的相關(guān)源碼,下面我們來看異步的執(zhí)行就簡(jiǎn)單多了。
查看其源碼,主要執(zhí)行了兩個(gè)函數(shù)_dispatch_continuation_init
和_dispatch_continuation_async
,下面我們一個(gè)個(gè)來看一下
void
dispatch_async(dispatch_queue_t dq, dispatch_block_t work)
{
dispatch_continuation_t dc = _dispatch_continuation_alloc();
uintptr_t dc_flags = DC_FLAG_CONSUME;
dispatch_qos_t qos;
qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags);
_dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}
2.1 _dispatch_continuation_init
通過源碼我們可知,這個(gè)函數(shù)dispatch_qos_t
這個(gè)對(duì)象,里面的實(shí)現(xiàn)必然是對(duì)其進(jìn)行初始化賦值的操作。
通過_dispatch_Block_invoke
的宏定義,我們可以發(fā)現(xiàn)其對(duì)傳入的dispatch_block_t
回調(diào)參數(shù)進(jìn)行了封裝。
DISPATCH_ALWAYS_INLINE
static inline dispatch_qos_t
_dispatch_continuation_init(dispatch_continuation_t dc,
dispatch_queue_class_t dqu, dispatch_block_t work,
dispatch_block_flags_t flags, uintptr_t dc_flags)
{
void *ctxt = _dispatch_Block_copy(work);
dc_flags |= DC_FLAG_BLOCK | DC_FLAG_ALLOCATED;
if (unlikely(_dispatch_block_has_private_data(work))) {
dc->dc_flags = dc_flags;
dc->dc_ctxt = ctxt;
// will initialize all fields but requires dc_flags & dc_ctxt to be set
return _dispatch_continuation_init_slow(dc, dqu, flags);
}
dispatch_function_t func = _dispatch_Block_invoke(work);
if (dc_flags & DC_FLAG_CONSUME) {
func = _dispatch_call_block_and_release;
}
return _dispatch_continuation_init_f(dc, dqu, ctxt, func, flags, dc_flags);
}
-----------------------------------------------------------------------------------------
#define _dispatch_Block_invoke(bb) \
((dispatch_function_t)((struct Block_layout *)bb)->invoke)
最終的封裝會(huì)在_dispatch_continuation_init_f
中,其代碼也非常的簡(jiǎn)單,仍舊是函數(shù)式保存的賦值的相關(guān)操作,對(duì)回調(diào)等也進(jìn)行了封裝保存。
而進(jìn)行封裝保存的意義也很簡(jiǎn)單:因?yàn)楫惒叫枰诤线m的時(shí)機(jī)進(jìn)行線程回調(diào)block
DISPATCH_ALWAYS_INLINE
static inline dispatch_qos_t
_dispatch_continuation_init_f(dispatch_continuation_t dc,
dispatch_queue_class_t dqu, void *ctxt, dispatch_function_t f,
dispatch_block_flags_t flags, uintptr_t dc_flags)
{
pthread_priority_t pp = 0;
dc->dc_flags = dc_flags | DC_FLAG_ALLOCATED;
dc->dc_func = f;
dc->dc_ctxt = ctxt;
// in this context DISPATCH_BLOCK_HAS_PRIORITY means that the priority
// should not be propagated, only taken from the handler if it has one
if (!(flags & DISPATCH_BLOCK_HAS_PRIORITY)) {
pp = _dispatch_priority_propagate();
}
_dispatch_continuation_voucher_set(dc, flags);
return _dispatch_continuation_priority_set(dc, dqu, pp, flags);
}
2.2 _dispatch_continuation_async
我們知道了上一步對(duì)信息進(jìn)行函數(shù)式封裝,那么對(duì)于一個(gè)異步執(zhí)行來說,最重要的就是何時(shí)創(chuàng)建線程和函數(shù)執(zhí)行呢,那么就再這個(gè)方法里面了。
查看方法,發(fā)現(xiàn)實(shí)現(xiàn)非常的簡(jiǎn)單,但是越簡(jiǎn)單的東西,其內(nèi)里就越復(fù)雜。這個(gè)方法主要就是執(zhí)行了dx_push
方法,查看其代碼,發(fā)現(xiàn)為宏定義,主要執(zhí)行了dq_push
方法.
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_continuation_async(dispatch_queue_class_t dqu,
dispatch_continuation_t dc, dispatch_qos_t qos, uintptr_t dc_flags)
{
#if DISPATCH_INTROSPECTION
if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
_dispatch_trace_item_push(dqu, dc);
}
#else
(void)dc_flags;
#endif
return dx_push(dqu._dq, dc, qos);
}
-----------------------------------------------------------------------------------------
#define dx_push(x, y, z) dx_vtable(x)->dq_push(x, y, z)
那么dq_push
又是怎么賦值的呢,由于其是一個(gè)屬性,所以我們可以搜索.dq_pus
來查看其賦值。我們發(fā)現(xiàn)其賦值的地方非常多,但是大體的意思我們可以理解,就是主要在根隊(duì)列,自定義隊(duì)列,主隊(duì)列等等進(jìn)行push
操作的時(shí)候調(diào)用。
我們知道線程的創(chuàng)建一般都是在根隊(duì)列上進(jìn)行創(chuàng)建的,所以我們直接找根隊(duì)列的dq_push
賦值,這樣比較快速,當(dāng)然其他的也可以,最終都會(huì)走到這里。
我們發(fā)現(xiàn)_dispatch_root_queue_push
方法最終會(huì)調(diào)用_dispatch_root_queue_push_inline
方法,而_dispatch_root_queue_push_inline
方法最終又會(huì)調(diào)用_dispatch_root_queue_poke
。
_dispatch_root_queue_poke
這個(gè)函數(shù)主要進(jìn)行了一些容錯(cuò)的判斷,最終走到了_dispatch_root_queue_poke_slow
相關(guān)的方法里
void
_dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou,
dispatch_qos_t qos)
{
#if DISPATCH_USE_KEVENT_WORKQUEUE
dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
if (unlikely(ddi && ddi->ddi_can_stash))
...一些不重要的操作 ...
#endif
#if HAVE_PTHREAD_WORKQUEUE_QOS
if (_dispatch_root_queue_push_needs_override(rq, qos)) {
return _dispatch_root_queue_push_override(rq, dou, qos);
}
#else
(void)qos;
#endif
_dispatch_root_queue_push_inline(rq, dou, dou, 1);
}
-----------------------------------------------------------------------------------------
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_root_queue_push_inline(dispatch_queue_global_t dq,
dispatch_object_t _head, dispatch_object_t _tail, int n)
{
struct dispatch_object_s *hd = _head._do, *tl = _tail._do;
if (unlikely(os_mpsc_push_list(os_mpsc(dq, dq_items), hd, tl, do_next))) {
return _dispatch_root_queue_poke(dq, n, 0);
}
}
-----------------------------------------------------------------------------------------
void
_dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor)
{
if (!_dispatch_queue_class_probe(dq)) {
return;
}
#if !DISPATCH_USE_INTERNAL_WORKQUEUE
#if DISPATCH_USE_PTHREAD_POOL
if (likely(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE))
#endif
{
if (unlikely(!os_atomic_cmpxchg2o(dq, dgq_pending, 0, n, relaxed))) {
_dispatch_root_queue_debug("worker thread request still pending "
"for global queue: %p", dq);
return;
}
}
#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE
return _dispatch_root_queue_poke_slow(dq, n, floor);
}
2.3 _dispatch_root_queue_poke_slow
這個(gè)方法就是異步執(zhí)行的主要方法,創(chuàng)建線程也是在此,由于代碼比較長(zhǎng),我們還是尋找代碼中的關(guān)鍵節(jié)點(diǎn)來講。
DISPATCH_NOINLINE
static void
_dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor)
{
...
?//隊(duì)列初始化,runtime強(qiáng)轉(zhuǎn)等操作,防止類型無法匹配等情況
_dispatch_root_queues_init();
_dispatch_debug_root_queue(dq, __func__);
_dispatch_trace_runtime_event(worker_request, dq, (uint64_t)n);
...
int can_request, t_count;
// seq_cst with atomic store to tail <rdar://problem/16932833>
?// 獲取線程池的大小
t_count = os_atomic_load2o(dq, dgq_thread_pool_size, ordered);
do {
?// 計(jì)算可以請(qǐng)求的數(shù)量
can_request = t_count < floor ? 0 : t_count - floor;
if (remaining > can_request) {
_dispatch_root_queue_debug("pthread pool reducing request from %d to %d",
remaining, can_request);
os_atomic_sub2o(dq, dgq_pending, remaining - can_request, relaxed);
remaining = can_request;
}
if (remaining == 0) {
// 線程池?zé)o可用將會(huì)報(bào)錯(cuò)
_dispatch_root_queue_debug("pthread pool is full for root queue: "
"%p", dq);
return;
}
} while (!os_atomic_cmpxchgvw2o(dq, dgq_thread_pool_size, t_count,
t_count - remaining, &t_count, acquire));
pthread_attr_t *attr = &pqc->dpq_thread_attr;
pthread_t tid, *pthr = &tid;
#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES
if (unlikely(dq == &_dispatch_mgr_root_queue)) {
pthr = _dispatch_mgr_root_queue_init();
}
#endif
do {
_dispatch_retain(dq); // released in _dispatch_worker_thread
???//開辟線程???
while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) {
if (r != EAGAIN) {
(void)dispatch_assume_zero(r);
}
_dispatch_temporary_resource_shortage();
}
} while (--remaining);
#else
(void)floor;
#endif // DISPATCH_USE_PTHREAD_POOL
}
-----------------------------------------------------------------------------------------
#define _dispatch_trace_runtime_event(evt, ptr, value) \
_dispatch_introspection_runtime_event(\
dispatch_introspection_runtime_event_##evt, ptr, value)
根據(jù)代碼可以知道,系統(tǒng)會(huì)獲取線程池總數(shù)量和可以創(chuàng)建的數(shù)量,然后通過兩個(gè)do while
來進(jìn)行動(dòng)態(tài)的開辟線程。
三、單例 dispatch_once
通過dispatch_once
函數(shù)查看其底層調(diào)用,可以發(fā)現(xiàn)其最終調(diào)用到dispatch_once_f
方法中。相關(guān)的代碼如下。
- 首先我們知道
val
一開始為NULL
,并將其轉(zhuǎn)換為dispatch_once_gate_t
- 通過查看
_dispatch_once_gate_tryenter
源碼,我們知道其在OS底層通過判斷l->dgo_once
是否為DLOCK_ONCE_UNLOCKED
狀態(tài) - 如果成立,則會(huì)執(zhí)行
_dispatch_once_callout
函數(shù)。執(zhí)行對(duì)應(yīng)的block,然后將l->dgo_once
置為DLOCK_ONCE_DONE
,從而保證了只執(zhí)行一次
DISPATCH_NOINLINE
void
dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
{
// 如果你來過一次 -- 下次就不來
dispatch_once_gate_t l = (dispatch_once_gate_t)val;
//DLOCK_ONCE_DONE
#if !DISPATCH_ONCE_INLINE_FASTPATH || DISPATCH_ONCE_USE_QUIESCENT_COUNTER
uintptr_t v = os_atomic_load(&l->dgo_once, acquire);
if (likely(v == DLOCK_ONCE_DONE)) {
return;
}
#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
if (likely(DISPATCH_ONCE_IS_GEN(v))) {
return _dispatch_once_mark_done_if_quiesced(l, v);
}
#endif
#endif
// 滿足條件 -- 試圖進(jìn)去
if (_dispatch_once_gate_tryenter(l)) {
// 單利調(diào)用 -- v->DLOCK_ONCE_DONE
return _dispatch_once_callout(l, ctxt, func);
}
return _dispatch_once_wait(l);
}
-----------------------------------------------------------------------------------------
DISPATCH_ALWAYS_INLINE
static inline bool
_dispatch_once_gate_tryenter(dispatch_once_gate_t l)
{
// os 對(duì)象是否存儲(chǔ)過
// unlock
return os_atomic_cmpxchg(&l->dgo_once, DLOCK_ONCE_UNLOCKED,
(uintptr_t)_dispatch_lock_value_for_self(), relaxed);
}
-----------------------------------------------------------------------------------------
_dispatch_once_callout(dispatch_once_gate_t l, void *ctxt,
dispatch_function_t func)
{
// block()
_dispatch_client_callout(ctxt, func);
_dispatch_once_gate_broadcast(l);
}
四、信號(hào)量 dispatch_semaphore
4.1 dispatch_semaphore_create
這個(gè)方法比較明確,就是函數(shù)式保存,轉(zhuǎn)換成了dispatch_semaphore_t
對(duì)象。信號(hào)量的處理都是基于此對(duì)象來進(jìn)行的。
dispatch_semaphore_t
dispatch_semaphore_create(long value)
{
dispatch_semaphore_t dsema;
// 如果 value 小于 0 直接返回 0
if (value < 0) {
return DISPATCH_BAD_INPUT;
}
dsema = _dispatch_object_alloc(DISPATCH_VTABLE(semaphore),
sizeof(struct dispatch_semaphore_s));
dsema->do_next = DISPATCH_OBJECT_LISTLESS;
dsema->do_targetq = _dispatch_get_default_queue(false);
dsema->dsema_value = value;
_dispatch_sema4_init(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
dsema->dsema_orig = value;
return dsema;
}
4.2 dispatch_semaphore_wait
wait
函數(shù)主要進(jìn)行了3步操作:
- 調(diào)用
os_atomic_dec2o
宏。通過對(duì)這個(gè)宏的查看,我們發(fā)現(xiàn)其就是一個(gè)對(duì)dsema
進(jìn)行原子性的-1
操作 - 判斷
value
是否>= 0
,如果滿足條件,則不阻塞,直接執(zhí)行 - 調(diào)用
_dispatch_semaphore_wait_slow
。通過源碼,我們可以發(fā)現(xiàn)其對(duì)timeout
的參數(shù)進(jìn)行了分別的處理
long
dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout)
{
long value = os_atomic_dec2o(dsema, dsema_value, acquire);
if (likely(value >= 0)) {
return 0;
}
return _dispatch_semaphore_wait_slow(dsema, timeout);
}
#define os_atomic_dec2o(p, f, m) \
os_atomic_sub2o(p, f, 1, m)
#define os_atomic_sub2o(p, f, v, m) \
os_atomic_sub(&(p)->f, (v), m)
#define os_atomic_sub(p, v, m) \
_os_atomic_c11_op((p), (v), m, sub, -)
_dispatch_semaphore_wait_slow
函數(shù)的處理如下:
-
default:
主要調(diào)用了_dispatch_sema4_timedwait
方法,這個(gè)方法主要是判斷當(dāng)前的操作是否超過指定的超時(shí)時(shí)間。 -
DISPATCH_TIME_NOW
中的while
是一定會(huì)執(zhí)行的,如果不滿足條件,已經(jīng)在之前的操作跳出了,不會(huì)執(zhí)行到此。if
操作調(diào)用os_atomic_cmpxchgvw2o
,會(huì)將value
進(jìn)行+1,跳出阻塞,并返回_DSEMA4_TIMEOUT
超時(shí) -
DISPATCH_TIME_FOREVER
中即調(diào)用_dispatch_sema4_wait
,表示會(huì)一直阻塞,知道等到single
加1變?yōu)?為止,跳出阻塞
DISPATCH_NOINLINE
static long
_dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema,
dispatch_time_t timeout)
{
long orig;
_dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
switch (timeout) {
default:
if (!_dispatch_sema4_timedwait(&dsema->dsema_sema, timeout)) {
break;
}
// Fall through and try to undo what the fast path did to
// dsema->dsema_value
case DISPATCH_TIME_NOW:
orig = dsema->dsema_value;
while (orig < 0) {
if (os_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1,
&orig, relaxed)) {
return _DSEMA4_TIMEOUT();
}
}
// Another thread called semaphore_signal().
// Fall through and drain the wakeup.
case DISPATCH_TIME_FOREVER:
_dispatch_sema4_wait(&dsema->dsema_sema);
break;
}
return 0;
}
4.3 dispatch_semaphore_signal
了解了wait
之后,對(duì)signal
的理解也很簡(jiǎn)單。os_atomic_inc2o
宏定義就是對(duì)dsema
進(jìn)行原子性+1
的操作,如果大于0,則繼續(xù)執(zhí)行。
long
dispatch_semaphore_signal(dispatch_semaphore_t dsema)
{
// 取值 + 1 == 0 + 1 = 1
long value = os_atomic_inc2o(dsema, dsema_value, release);
if (likely(value > 0)) {
return 0;
}
if (unlikely(value == LONG_MIN)) {
DISPATCH_CLIENT_CRASH(value,
"Unbalanced call to dispatch_semaphore_signal()");
}
return _dispatch_semaphore_signal_slow(dsema);
}
總結(jié)一下信號(hào)的底層原理:
信號(hào)量在初始化時(shí)要指定 value,隨后內(nèi)部將這個(gè) value 進(jìn)行函數(shù)式保存。實(shí)際操作時(shí)會(huì)存兩個(gè) value,一個(gè)是當(dāng)前的value,一個(gè)是記錄初始 value。信號(hào)的 wait 和 signal 是互逆的兩個(gè)操作,wait進(jìn)行減1的操作,single進(jìn)行加1的操作。初始 value 必須大于等于 0,如果為0或者小于0 并隨后調(diào)用 wait 方法,線程將被阻塞直到別的線程調(diào)用了 signal 方法
五、調(diào)度組 dispatch_group
其實(shí)dispatch_group
的相關(guān)函數(shù)的底層原理和信號(hào)量的底層原理的思想是一樣的。都是在底層維護(hù)了一個(gè)value
的值,進(jìn)組和出組操作時(shí),對(duì)value
的值進(jìn)行操作,達(dá)到0這個(gè)臨界值的時(shí)候,進(jìn)行后續(xù)的操作。
5.1 dispatch_group_create
和信號(hào)量類似,創(chuàng)建組后,對(duì)其進(jìn)行了函數(shù)式保存dispatch_group_t
,并通過os_atomic_store2o
宏定義,內(nèi)部維護(hù)了一個(gè)value
的值
dispatch_group_t
dispatch_group_create(void)
{
return _dispatch_group_create_with_count(0);
}
-----------------------------------------------------------------------------------------
DISPATCH_ALWAYS_INLINE
static inline dispatch_group_t
_dispatch_group_create_with_count(uint32_t n)
{
dispatch_group_t dg = _dispatch_object_alloc(DISPATCH_VTABLE(group),
sizeof(struct dispatch_group_s));
dg->do_next = DISPATCH_OBJECT_LISTLESS;
dg->do_targetq = _dispatch_get_default_queue(false);
if (n) {
os_atomic_store2o(dg, dg_bits,
-n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed);
os_atomic_store2o(dg, do_ref_cnt, 1, relaxed); // <rdar://22318411>
}
return dg;
}
5.2 dispatch_group_enter
通過源碼,我們可以知道進(jìn)組操作,主要是先通過os_atomic_sub_orig2o
宏定義,對(duì)bit
進(jìn)行了原子性減1的操作,然后又通過位運(yùn)算& DISPATCH_GROUP_VALUE_MASK
獲得真正的value
void
dispatch_group_enter(dispatch_group_t dg)
{
// The value is decremented on a 32bits wide atomic so that the carry
// for the 0 -> -1 transition is not propagated to the upper 32bits.
uint32_t old_bits = os_atomic_sub_orig2o(dg, dg_bits,
DISPATCH_GROUP_VALUE_INTERVAL, acquire);
uint32_t old_value = old_bits & DISPATCH_GROUP_VALUE_MASK;
if (unlikely(old_value == 0)) {
_dispatch_retain(dg); // <rdar://problem/22318411>
}
if (unlikely(old_value == DISPATCH_GROUP_VALUE_MAX)) {
DISPATCH_CLIENT_CRASH(old_bits,
"Too many nested calls to dispatch_group_enter()");
}
}
5.3 dispatch_group_leave
出組的操作即通過os_atomic_add_orig2o
的對(duì)值進(jìn)行原子性的加操作,并通過& DISPATCH_GROUP_VALUE_MASK
獲取到真實(shí)的value
值。如果新舊兩個(gè)值相等,則執(zhí)行_dispatch_group_wake
操作,進(jìn)行后續(xù)的操作。
void
dispatch_group_leave(dispatch_group_t dg)
{
// The value is incremented on a 64bits wide atomic so that the carry for
// the -1 -> 0 transition increments the generation atomically.
uint64_t new_state, old_state = os_atomic_add_orig2o(dg, dg_state,
DISPATCH_GROUP_VALUE_INTERVAL, release);
uint32_t old_value = (uint32_t)(old_state & DISPATCH_GROUP_VALUE_MASK);
if (unlikely(old_value == DISPATCH_GROUP_VALUE_1)) {
old_state += DISPATCH_GROUP_VALUE_INTERVAL;
do {
new_state = old_state;
if ((old_state & DISPATCH_GROUP_VALUE_MASK) == 0) {
new_state &= ~DISPATCH_GROUP_HAS_WAITERS;
new_state &= ~DISPATCH_GROUP_HAS_NOTIFS;
} else {
// If the group was entered again since the atomic_add above,
// we can't clear the waiters bit anymore as we don't know for
// which generation the waiters are for
new_state &= ~DISPATCH_GROUP_HAS_NOTIFS;
}
if (old_state == new_state) break;
} while (unlikely(!os_atomic_cmpxchgv2o(dg, dg_state,
old_state, new_state, &old_state, relaxed)));
return _dispatch_group_wake(dg, old_state, true);
}
if (unlikely(old_value == 0)) {
DISPATCH_CLIENT_CRASH((uintptr_t)old_value,
"Unbalanced call to dispatch_group_leave()");
}
}
5.4 dispatch_group_async
dispatch_group_async
函數(shù)就是對(duì)enter
和leave
的封裝。通過代碼可以看出其和異步調(diào)用函數(shù)類似,都對(duì)block進(jìn)行的封裝保存。然后再內(nèi)部執(zhí)行的時(shí)候,手工調(diào)用了dispatch_group_enter
和dispatch_group_leave
方法。
void
dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
dispatch_block_t db)
{
dispatch_continuation_t dc = _dispatch_continuation_alloc();
uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_GROUP_ASYNC;
dispatch_qos_t qos;
// 保存任務(wù)
qos = _dispatch_continuation_init(dc, dq, db, 0, dc_flags);
_dispatch_continuation_group_async(dg, dq, dc, qos);
}
static inline void
_dispatch_continuation_group_async(dispatch_group_t dg, dispatch_queue_t dq,
dispatch_continuation_t dc, dispatch_qos_t qos)
{ // 進(jìn)組
dispatch_group_enter(dg);
dc->dc_data = dg;
_dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}
static inline void
_dispatch_continuation_with_group_invoke(dispatch_continuation_t dc)
{
struct dispatch_object_s *dou = dc->dc_data;
unsigned long type = dx_type(dou);
if (type == DISPATCH_GROUP_TYPE) {
_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
_dispatch_trace_item_complete(dc);
// 出組
dispatch_group_leave((dispatch_group_t)dou);
} else {
DISPATCH_INTERNAL_CRASH(dx_type(dou), "Unexpected object type");
}
}
5.5 dispatch_group_notify
通過源碼,我們可以發(fā)現(xiàn),通過調(diào)用os_atomic_rmw_loop2o
在系統(tǒng)內(nèi)核中獲取到對(duì)應(yīng)的狀態(tài),最終還是調(diào)用到了_dispatch_group_wake
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq,
dispatch_continuation_t dsn)
{
uint64_t old_state, new_state;
dispatch_continuation_t prev;
dsn->dc_data = dq;
_dispatch_retain(dq);
prev = os_mpsc_push_update_tail(os_mpsc(dg, dg_notify), dsn, do_next);
if (os_mpsc_push_was_empty(prev)) _dispatch_retain(dg);
os_mpsc_push_update_prev(os_mpsc(dg, dg_notify), prev, dsn, do_next);
if (os_mpsc_push_was_empty(prev)) {
os_atomic_rmw_loop2o(dg, dg_state, old_state, new_state, release, {
new_state = old_state | DISPATCH_GROUP_HAS_NOTIFS;
if ((uint32_t)old_state == 0) {
os_atomic_rmw_loop_give_up({
return _dispatch_group_wake(dg, new_state, false);
});
}
});
}
}
_dispatch_group_wake
這個(gè)函數(shù)主要分為兩部分,首先循環(huán)調(diào)用 semaphore_signal
告知喚醒當(dāng)初等待 group 的信號(hào)量,因此 dispatch_group_wait
函數(shù)得以返回。
然后獲取鏈表,依次調(diào)用 dispatch_async_f
異步執(zhí)行在 notify
函數(shù)中注冊(cè)的回調(diào)。
DISPATCH_NOINLINE
static void
_dispatch_group_wake(dispatch_group_t dg, uint64_t dg_state, bool needs_release)
{
uint16_t refs = needs_release ? 1 : 0; // <rdar://problem/22318411>
if (dg_state & DISPATCH_GROUP_HAS_NOTIFS) {
dispatch_continuation_t dc, next_dc, tail;
// Snapshot before anything is notified/woken <rdar://problem/8554546>
dc = os_mpsc_capture_snapshot(os_mpsc(dg, dg_notify), &tail);
do {
dispatch_queue_t dsn_queue = (dispatch_queue_t)dc->dc_data;
next_dc = os_mpsc_pop_snapshot_head(dc, tail, do_next);
_dispatch_continuation_async(dsn_queue, dc,
_dispatch_qos_from_pp(dc->dc_priority), dc->dc_flags);
_dispatch_release(dsn_queue);
} while ((dc = next_dc));
refs++;
}
if (dg_state & DISPATCH_GROUP_HAS_WAITERS) {
_dispatch_wake_by_address(&dg->dg_gen);
}
if (refs) _dispatch_release_n(dg, refs);
}
總結(jié)
-
dispatch_sync
將任務(wù)block
通過push
到隊(duì)列中,然后按照FIFO
去執(zhí)行。 -
dispatch_sync
造成死鎖的主要原因是堵塞的tid
和現(xiàn)在運(yùn)行的tid
為同一個(gè) -
dispatch_async
會(huì)把任務(wù)包裝并保存,之后就會(huì)開辟相應(yīng)線程去執(zhí)行已保存的任務(wù)。 -
semaphore
主要在底層維護(hù)一個(gè)value
的值,使用signal
進(jìn)行+ +1
,wait
進(jìn)行-1
。如果value
的值大于或者等于0,則取消阻塞,否則根據(jù)timeout
參數(shù)進(jìn)行超時(shí)判斷 -
dispatch_group
底層也是維護(hù)了一個(gè)value
的值,等待group
完成實(shí)際上就是等待value
恢復(fù)初始值。而notify
的作用是將所有注冊(cè)的回調(diào)組裝成一個(gè)鏈表,在dispatch_async
完成時(shí)判斷value
是不是恢復(fù)初始值,如果是則調(diào)用dispatch_async
異步執(zhí)行所有注冊(cè)的回調(diào)。 -
dispatch_once
通過一個(gè)靜態(tài)變量來標(biāo)記block
是否已被執(zhí)行,同時(shí)使用加鎖確保只有一個(gè)線程能執(zhí)行,執(zhí)行完block
后會(huì)喚醒其他所有等待的線程。