iOS底層原理24、GCD 的應用

一、GCD的應用與應用原理

1、單例 - dispatch_once_f()

單例代碼塊:

 // 單例 _dispatch_once
    static dispatch_once_t onceToken;
    dispatch_once(&onceToken, ^{
        <#code to be executed once#>
    });

源碼搜索:

#if DISPATCH_ONCE_INLINE_FASTPATH
DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL DISPATCH_NOTHROW
DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead")
void
_dispatch_once(dispatch_once_t *predicate,
        DISPATCH_NOESCAPE dispatch_block_t block)
{
    if (DISPATCH_EXPECT(*predicate, ~0l) != ~0l) {
        dispatch_once(predicate, block);
    } else {
        dispatch_compiler_barrier();// 柵欄
    }
    DISPATCH_COMPILER_CAN_ASSUME(*predicate == ~0l);
}
#undef dispatch_once
#define dispatch_once _dispatch_once
#endif
#endif // DISPATCH_ONCE_INLINE_FASTPATH

// 搜索 dispatch_once(dis :
#ifdef __BLOCKS__
void
dispatch_once(dispatch_once_t *val, dispatch_block_t block)
{
    dispatch_once_f(val, block, _dispatch_Block_invoke(block));
}
#endif

1.1、找到單例源碼 - dispatch_once_f():

DISPATCH_NOINLINE
void
dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
{
    // val --> 單例創建時,外部傳來的 static 靜態變量
    dispatch_once_gate_t l = (dispatch_once_gate_t)val;

#if !DISPATCH_ONCE_INLINE_FASTPATH || DISPATCH_ONCE_USE_QUIESCENT_COUNTER
    uintptr_t v = os_atomic_load(&l->dgo_once, acquire);
    if (likely(v == DLOCK_ONCE_DONE)) {
        // 標記 DLOCK_ONCE_DONE,單例下次再來直接return
        return;
    }
#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
    if (likely(DISPATCH_ONCE_IS_GEN(v))) {
        return _dispatch_once_mark_done_if_quiesced(l, v);
    }
#endif
#endif
    if (_dispatch_once_gate_tryenter(l)) {
        return _dispatch_once_callout(l, ctxt, func);
    }
    // _dispatch_once_gate_tryenter 
    // 當前如果是鎖住狀態那么一直等,for的死循環,直到滿足出口條件或 timeout
    return _dispatch_once_wait(l);
}

單例如何只執行一次呢? --> 標識符?
源碼分析
_dispatch_once_gate_tryenter()鎖:

DISPATCH_ALWAYS_INLINE
static inline bool
_dispatch_once_gate_tryenter(dispatch_once_gate_t l)
{
    /**
     compare change,對比,
     once unlocked
     */
    return os_atomic_cmpxchg(&l->dgo_once, DLOCK_ONCE_UNLOCKED,
            (uintptr_t)_dispatch_lock_value_for_self(), relaxed);
}

// bool 是 YES 則 --> _dispatch_once_callout()
static void
_dispatch_once_callout(dispatch_once_gate_t l, void *ctxt,
        dispatch_function_t func)
{
    _dispatch_client_callout(ctxt, func);// return f(ctxt); 函數回調
    //  once done 處理
    _dispatch_once_gate_broadcast(l);
}

_dispatch_once_gate_broadcast():

DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_once_gate_broadcast(dispatch_once_gate_t l)
{
    // 給自己加鎖 lock --> 線程安全
    dispatch_lock value_self = _dispatch_lock_value_for_self();
    uintptr_t v;
#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
    v = _dispatch_once_mark_quiescing(l);
#else
    // return os_atomic_xchg(&dgo->dgo_once, DLOCK_ONCE_DONE, release);
    v = _dispatch_once_mark_done(l);// 標記 DLOCK_ONCE_DONE,單例下次再來直接return
#endif
    if (likely((dispatch_lock)v == value_self)) return;
    _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)v);
}

單例總結:

  • 單例的流程:
    1. 第一次進來,鎖是開著的,進入_dispatch_once_callout() 處理block任務f(ctxt);
    2. _dispatch_once_gate_broadcast(),給自己加鎖(保證了線程安全),然后對once標識進行標記處理;
    3. 再次進來,標識符已是 done,直接return。

2、柵欄函數

2.1、應用測試代碼:

- (void)my_barrier {
    
    dispatch_queue_t queue01 = dispatch_queue_create("create_my_queue01", DISPATCH_QUEUE_CONCURRENT);
    
    // 異步函數
    dispatch_async(queue01, ^{
        sleep(1);
        NSLog(@"%@ -- 耗時任務",[NSThread currentThread]);
    });
    
    
    /**
     柵欄函數的 同/異步
     同步:阻塞后面流程
     異步:不阻
     */
    dispatch_barrier_async(queue01, ^{
        
        NSLog(@"%@ -- 柵欄",[NSThread currentThread]);
    });
    
//    dispatch_barrier_async(queue01, ^{
//
//        NSLog(@"%@ -- 柵欄",[NSThread currentThread]);
//    });
    
    
    dispatch_async(queue01, ^{
        NSLog(@"%@ -- 我是前面加了柵欄函數",[NSThread currentThread]);
    });
    
    NSLog(@"-- 我是主線程 --");
    
}

/** 同步柵欄函數輸出結果
 <NSThread: 0x600002c2c680>{number = 5, name = (null)} -- 耗時任務
 <NSThread: 0x600002c4e140>{number = 1, name = main} -- 柵欄
 -- 我是主線程 --
 <NSThread: 0x600002c2c680>{number = 5, name = (null)} -- 我是前面加了柵欄函數
 */

/** 異步柵欄函數輸出結果
 -- 我是主線程 --
 <NSThread: 0x600003abad80>{number = 7, name = (null)} -- 耗時任務
 <NSThread: 0x600003abad80>{number = 7, name = (null)} -- 柵欄
 <NSThread: 0x600003abad80>{number = 7, name = (null)} -- 我是前面加了柵欄函數
*/
  1. 同步柵欄函數:阻塞當前線程(在哪堵哪),直至任務完成 --> 同步鎖的效果
  2. 異步柵欄函數:不阻塞當前線程,只阻攔柵欄所在隊列的的任務效果
  • 柵欄函數,異步欄隊列,同步欄線程;
  • 柵欄函數只能用于給自定義的并行隊列添加,串行隊列加柵欄雖不報錯但即浪費性能又沒任何意義。

2.2、柵欄函數底層原理

同步函數 - dispatch_sync

DISPATCH_NOINLINE
void
dispatch_sync(dispatch_queue_t dq, dispatch_block_t work)
{
    uintptr_t dc_flags = DC_FLAG_BLOCK;
    if (unlikely(_dispatch_block_has_private_data(work))) {
        return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
    }
    _dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}

_dispatch_sync_f()--> _dispatch_sync_f_inline()

DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt,
        dispatch_function_t func, uintptr_t dc_flags)
{
    if (likely(dq->dq_width == 1)) {// width = 1 --> 串行隊列
        // _dispatch_barrier_sync_f 柵欄函數
        return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags);
    }

    if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
        DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
    }

    dispatch_lane_t dl = upcast(dq)._dl;
    // Global concurrent queues and queues bound to non-dispatch threads
    // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
    /**
     全局并發隊列 和  綁定到非分派線程的 隊列,總是屬于慢速情況
     */
    if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
        return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags);
    }

    if (unlikely(dq->do_targetq->do_targetq)) {
        return _dispatch_sync_recurse(dl, ctxt, func, dc_flags);
    }
    _dispatch_introspection_sync_begin(dl);
    _dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG(
            _dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags)));
}
2.2.1、_dispatch_barrier_sync_f()
1)柵欄 - 串行隊列

通過上面源碼得知,串行隊列和柵欄函數走的是同一行代碼邏輯_dispatch_barrier_sync_f() .
繼續探索源碼:_dispatch_barrier_sync_f()-->_dispatch_barrier_sync_f_inline()

DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt,
        dispatch_function_t func, uintptr_t dc_flags)
{
    dispatch_tid tid = _dispatch_tid_self();// 當前線程的 tid <-- port ,每個線程都有自己的tid

    if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
        DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
    }

    dispatch_lane_t dl = upcast(dq)._dl;
    // The more correct thing to do would be to merge the qos of the thread
    // that just acquired the barrier lock into the queue state.
    //
    // However this is too expensive for the fast path, so skip doing it.
    // The chosen tradeoff is that if an enqueue on a lower priority thread
    // contends with this fast path, this thread may receive a useless override.
    //
    // Global concurrent queues and queues bound to non-dispatch threads
    // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
    /** 柵欄 鎖
     更正確的做法是將剛剛獲得 barrier鎖 的線程的qos合并到隊列狀態中。
     但是,對于快速路徑來說,這太昂貴了,所以跳過它。
     選擇的折衷是,如果一個隊列在一個較低優先級的線程與這個快速路徑競爭,這個線程可能會收到一個無用的總是落入緩慢的情況。
     */
    if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) {
        return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
                DC_FLAG_BARRIER | dc_flags);
    }

    if (unlikely(dl->do_targetq->do_targetq)) {
        return _dispatch_sync_recurse(dl, ctxt, func,
                DC_FLAG_BARRIER | dc_flags);// 遞歸 recurse
    }
    _dispatch_introspection_sync_begin(dl);// 一系列準備工作的處理
    _dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func
            DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
                    dq, ctxt, func, dc_flags | DC_FLAG_BARRIER)));
}

_dispatch_lane_barrier_sync_invoke_and_complete():

/*
 * For queues we can cheat and inline the unlock code, which is invalid
 * for objects with a more complex state machine (sources or mach channels)
 */
DISPATCH_NOINLINE
static void
_dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq,
        void *ctxt, dispatch_function_t func DISPATCH_TRACE_ARG(void *dc))
{
    // 對線程的 push -> callout -> work -> pop
    _dispatch_sync_function_invoke_inline(dq, ctxt, func);
    
    // 執行完畢
    _dispatch_trace_item_complete(dc);
    if (unlikely(dq->dq_items_tail || dq->dq_width > 1)) {
        return _dispatch_lane_barrier_complete(dq, 0, 0);
    }

    // Presence of any of these bits requires more work that only
    // _dispatch_*_barrier_complete() handles properly
    //
    // Note: testing for RECEIVED_OVERRIDE or RECEIVED_SYNC_WAIT without
    // checking the role is sloppy, but is a super fast check, and neither of
    // these bits should be set if the lock was never contended/discovered.
    const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK |
            DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY |
            DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER |
            DISPATCH_QUEUE_RECEIVED_SYNC_WAIT;
    uint64_t old_state, new_state;

    // similar to _dispatch_queue_drain_try_unlock
    // 對隊列中其他線程釋放,開始執行其他線程任務
    os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
        new_state  = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
        new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
        new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
        if (unlikely(old_state & fail_unlock_mask)) {
            os_atomic_rmw_loop_give_up({
                // 柵欄已經完成了
                return _dispatch_lane_barrier_complete(dq, 0, 0);
            });
        }
    });
    if (_dq_state_is_base_wlh(old_state)) {
        _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq);
    }
}

如上,柵欄和串行隊列,因串行隊列并不開辟新線程,其后續任務都要等當前的完成后通知釋放其他線程開始繼續執行。

2)并發隊列 加柵欄函數實現原理

并發隊列是如何實現柵欄阻塞的呢?
_dispatch_queue_try_acquire_barrier_sync()
-->_dispatch_queue_try_acquire_barrier_sync_and_suspend()

/* Used by _dispatch_barrier_{try,}sync
 *
 * Note, this fails if any of e:1 or dl!=0, but that allows this code to be a
 * simple cmpxchg which is significantly faster on Intel, and makes a
 * significant difference on the uncontended codepath.
 *
 * See discussion for DISPATCH_QUEUE_DIRTY in queue_internal.h
 *
 * Initial state must be `completely idle`
 * Final state forces { ib:1, qf:1, w:0 }
 */
DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT
static inline bool
_dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_lane_t dq,
        uint32_t tid, uint64_t suspend_count)
{
    uint64_t init  = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width);
    uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER |
            _dispatch_lock_value_from_tid(tid) |
            (suspend_count * DISPATCH_QUEUE_SUSPEND_INTERVAL);
    uint64_t old_state, new_state;

    return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
        uint64_t role = old_state & DISPATCH_QUEUE_ROLE_MASK;
        if (old_state != (init | role)) {
            // give_up 暫時先放棄掉  和上面的那個柵欄完成的放開呼應
            os_atomic_rmw_loop_give_up(break);
        }
        new_state = value | role;
    });
}

系統級os_atomic_rmw_loop2o()對線程的控制操作,暫時會將其他線程任務都放棄掉直至柵欄完成。

2.2.2、死鎖

舉例主線程死鎖
通過源碼分析死鎖原因:
當在主線程添加同步任務時,主隊列掛起,向當前隊列添加任務:
_dispatch_sync_f_slow()

#pragma mark -
#pragma mark dispatch_sync / dispatch_barrier_sync

DISPATCH_NOINLINE
static void
_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt,
        dispatch_function_t func, uintptr_t top_dc_flags,
        dispatch_queue_class_t dqu, uintptr_t dc_flags)
{
    dispatch_queue_t top_dq = top_dqu._dq;
    dispatch_queue_t dq = dqu._dq;
    if (unlikely(!dq->do_targetq)) {
        return _dispatch_sync_function_invoke(dq, ctxt, func);
    }

    pthread_priority_t pp = _dispatch_get_priority();
    struct dispatch_sync_context_s dsc = {
        .dc_flags    = DC_FLAG_SYNC_WAITER | dc_flags,
        .dc_func     = _dispatch_async_and_wait_invoke,
        .dc_ctxt     = &dsc,
        .dc_other    = top_dq,
        .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG,
        .dc_voucher  = _voucher_get(),
        .dsc_func    = func,
        .dsc_ctxt    = ctxt,
        .dsc_waiter  = _dispatch_tid_self(),
    };

    // 將任務 push 到隊列中
    _dispatch_trace_item_push(top_dq, &dsc);
    // 隊列是不是在 wait
    __DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq);
    /**
     DISPATCH_NOINLINE
     static void
     __DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq)
     {
         uint64_t dq_state = _dispatch_wait_prepare(dq);// 讓 dq 準備等待
         if (unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))) {
            // crash 系統拋出問題
             DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
                     "dispatch_sync called on queue "
                     "already owned by current thread");
         }
        ... 更多代碼 ...
     }
     
     // _dq_state_drain_locked_by() --> _dispatch_lock_is_locked_by()
     DISPATCH_ALWAYS_INLINE
     static inline bool
     _dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid)
     {
         // equivalent to _dispatch_lock_owner(lock_value) == tid
        // 正在等待的隊列和當前添加進任務的隊列 tid 相同,是同一個隊列
         return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
     }
     
     */

    if (dsc.dsc_func == NULL) {
        // dsc_func being cleared means that the block ran on another thread ie.
        // case (2) as listed in _dispatch_async_and_wait_f_slow.
        dispatch_queue_t stop_dq = dsc.dc_other;
        return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags);
    }

    _dispatch_introspection_sync_begin(top_dq);
    _dispatch_trace_item_pop(top_dq, &dsc);
    _dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags
            DISPATCH_TRACE_ARG(&dsc));
}

*已經在等待的和要開始等待的隊列(tid)相同 == 相同拋出異常 == 死鎖

3、信號量

示例代碼:

//信號量
- (void)my_dispatch_semaphore {
    
    dispatch_queue_t queue01 = dispatch_get_global_queue(0, 0);

    // 2: 可同時執行 2 個任務
    dispatch_semaphore_t sem = dispatch_semaphore_create(3);
    /**
     開始執行任務02
     開始執行任務01
     任務02完成
     任務01完成
     開始執行任務03
     任務03完成
     */
    
    dispatch_async(queue01, ^{
        
        dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER);
        NSLog(@"開始任務01");
        sleep(1);
        NSLog(@"任務01完成");
        dispatch_semaphore_signal(sem);
    });
    
    dispatch_async(queue01, ^{
        
        dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER);
        NSLog(@"開始任務02");
        sleep(1);
        NSLog(@"任務02完成");
        dispatch_semaphore_signal(sem);
    });
    
    dispatch_async(queue01, ^{
        
        dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER);
        NSLog(@"開始任務03");
        sleep(1);
        NSLog(@"任務03完成");
        dispatch_semaphore_signal(sem);
    });
}

3.1、dispatch_semaphore_signal() --> 信號++

long
dispatch_semaphore_signal(dispatch_semaphore_t dsema)
{
    long value = os_atomic_inc2o(dsema, dsema_value, release);
    if (likely(value > 0)) {
        return 0;
    }
    if (unlikely(value == LONG_MIN)) {
        DISPATCH_CLIENT_CRASH(value,
                "Unbalanced call to dispatch_semaphore_signal()");
    }
    return _dispatch_semaphore_signal_slow(dsema);
}

拆宏:(dsema, dsema_value, release)
os_atomic_inc2o(p, f, m)
-->os_atomic_add2o(p, f, 1, m)
-->os_atomic_add(&(p)->f, (v), m) : os_atomic_add(&(dsema->dsema_value), (1), m)
-->_os_atomic_c11_op((p), (v), m, add, +) : _os_atomic_c11_op(dsema->dsema_value, 1, m, add, +)
--> atomic_fetch_add_explicit(dsema->dsema_value, 1)

({ _os_atomic_basetypeof(p) _v = (v), _r = \
        atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \
        memory_order_##m); (__typeof__(_r))(_r op _v); })
})

atomic_fetch_add_explicit(dsema->dsema_value, 1)
即:對dsema->dsema_value原子性加加操作。

3.2、dispatch_semaphore_wait() --> 信號 --

long
dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout)
{
    long value = os_atomic_dec2o(dsema, dsema_value, acquire);
    if (likely(value >= 0)) {// 1 - 1 = 0,所以 0 也是合理的
        return 0;
    }
    return _dispatch_semaphore_wait_slow(dsema, timeout);
}

os_atomic_dec2o()同理os_atomic_inc2o(dsema->dsema_value, 1)
atomic_fetch_sub_explicit()
dsema->dsema_value原子性減減

4、調度組

4.1、示例代碼:

// 調度組
- (void)my_dispath_group {
    
   dispatch_queue_t my_queue = dispatch_queue_create("my_queue001", DISPATCH_QUEUE_CONCURRENT);
    
    dispatch_group_t group = dispatch_group_create();
    
    dispatch_group_enter(group);
    dispatch_async(dispatch_get_global_queue(0, 0), ^{
        
        NSLog(@"開始任務01");
        sleep(1);
        
        dispatch_async(my_queue, ^{
            NSLog(@"任務01里面的異步耗時任務開始");
            sleep(3);
            NSLog(@"任務01里面的異步耗時任務完成");
        });
        
        NSLog(@"任務01完成");
        
        dispatch_group_leave(group);
    });
    
    dispatch_group_enter(group);
    dispatch_async(dispatch_get_global_queue(0, 0), ^{
        
        NSLog(@"開始任務02");
        sleep(1);
        NSLog(@"任務02完成");
        
        dispatch_group_leave(group);
    });
    
    dispatch_group_notify(group, dispatch_get_main_queue(), ^{
        NSLog(@"group 里的任務完成了");
    });
    
    NSLog(@"-- 我是主線程的任務 --");
}

/** 運行結果:
    開始任務01
     -- 我是主線程的任務 --
    開始任務02
    任務02完成
    任務01完成
    任務01里面的異步耗時任務開始
    group 里的任務完成了
    任務01里面的異步耗時任務完成
    */
4.1.1、假定需求場景 Domo

需求:需要上述代碼中的任務 01 02 03 無順序要求,但在都執行完畢后通知主線程處理其他事務。
上述代碼的運行結果已知,任務03并未完成,但是dispatch_group_notify()已執行了,顯然并不滿足需求。
解決方案:修改代碼如下,將任務 01 中的異步任務 03 也加進調度組中,再次運行程序:

// 方案1
dispatch_group_enter(group);
        dispatch_async(my_queue, ^{
            NSLog(@"任務01里面的異步耗時任務03開始");
            sleep(3);
            NSLog(@"任務01里面的異步耗時任務03完成");
            dispatch_group_leave(group);
        });
// 方案2
dispatch_group_async(group, my_queue, ^{
            NSLog(@"任務01里面的異步耗時任務03開始");
            sleep(3);
            NSLog(@"任務01里面的異步耗時任務03完成");
        });

/** 上述2方案,運行結果相同:
    -- 我是主線程的任務 --
    開始任務01
    開始任務02
    任務01完成
    任務02完成
    任務01里面的異步耗時任務03開始
    任務01里面的異步耗時任務03完成
    group 里的任務完成了
    */

看似已滿足需求,是完全沒問題了嗎???

4.1.2、下面以上述代碼為例,我們對調度組進行多種場景測試:

1)代碼修改如下,執行:

// 1、enter leave 不成對,enter多于leave
dispatch_group_notify(group, dispatch_get_main_queue(), ^{
        NSLog(@"group 里的任務完成了");// 一直等 notify 不會執行
    });
// 2、leave 不成對,enter少于leave
運行崩潰

2)代碼再次修改如下,執行:

// 將`dispatch_group_notify`移動到所有任務`enter`進組前面
// 1、enter leave 成對
dispatch_group_notify(group, dispatch_get_main_queue(), ^{
        NSLog(@"group 里的任務完成了");
    });
// 2、leave 全部注釋掉,只剩 enter
dispatch_group_enter(group);
    dispatch_async(dispatch_get_global_queue(0, 0), ^{
..... 不做重復......
});
    
/** 1、2 運行結果相同,如下:
     -- 我是主線程的任務 --
    開始任務02
    開始任務01
    group 里的任務完成了
    任務02完成
    任務01完成
    任務01里面的異步耗時任務03開始
    任務01里面的異步耗時任務03完成
     */

問題01: 為何group_notify通知和 group_enter group_leave 好像無關了呢?

§調度組應用總結:

  • 在任務enter進組和leave成對出現后就會通知完成dispatch_group_notify.
  • dispatch_group_notify的通知,只要group_entergroup_leave成對出現了就可能被通知,不一定是group中任務都已完成;
  • group_entergroup_leave不成對時:
    1. group_enter多時,程序dispatch_group_notify的通知不會被執行
    2. group_leave多時,運行程序崩潰,崩潰位置不一定,leaveenter只要成組,不一定是按代碼編寫順序的。

4.2、調度組 group 原理

問題02:dispatch_group_async()group_enter / group_leave為何效果相同?
下面帶著2個問題,對調度組原理進行分析。

4.2.1、group創建

dispatch_group_create(): alloc

dispatch_group_t
dispatch_group_create(void)
{
    return _dispatch_group_create_with_count(0);
}

// _dispatch_group_create_with_count()
DISPATCH_ALWAYS_INLINE
static inline dispatch_group_t
_dispatch_group_create_with_count(uint32_t n)
{
    dispatch_group_t dg = _dispatch_object_alloc(DISPATCH_VTABLE(group),
            sizeof(struct dispatch_group_s));
    dg->do_next = DISPATCH_OBJECT_LISTLESS;
    dg->do_targetq = _dispatch_get_default_queue(false);
    if (n) {// 創建 group 對象 n = 0 所以這里不進
        os_atomic_store2o(dg, dg_bits,
                (uint32_t)-n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed);
        os_atomic_store2o(dg, do_ref_cnt, 1, relaxed); // <rdar://22318411>
    }
    return dg;
}
4.2.2、dispatch_group_enter()
void
dispatch_group_enter(dispatch_group_t dg)
{
    // The value is decremented on a 32bits wide atomic so that the carry
    // for the 0 -> -1 transition is not propagated to the upper 32bits.
    // os_atomic_sub_orig2o:對 dg->dg_bits 進行 減減 操作 --> 類似信號量
    uint32_t old_bits = os_atomic_sub_orig2o(dg, dg_bits,
            DISPATCH_GROUP_VALUE_INTERVAL, acquire);
    uint32_t old_value = old_bits & DISPATCH_GROUP_VALUE_MASK;
    // 最初 0 - 1 = -1 ,為 0 的場景不多
    if (unlikely(old_value == 0)) {
        _dispatch_retain(dg); // <rdar://problem/22318411>
    }
    // DISPATCH_GROUP_VALUE_MAX : 0x0000000000000004ULL
    // old_value 最大 4,old_value = 4 時會拋出異常 報錯
    if (unlikely(old_value == DISPATCH_GROUP_VALUE_MAX)) {
        DISPATCH_CLIENT_CRASH(old_bits,
                "Too many nested calls to dispatch_group_enter()");
    }
}

dispatch_group_enter()后是如何堵塞住dispatch_group_notify()的呢?--> dispatch_group_notify()源碼分析解答。

4.2.3、dispatch_group_notify()
#ifdef __BLOCKS__
void
dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq,
        dispatch_block_t db)
{
    dispatch_continuation_t dsn = _dispatch_continuation_alloc();
    _dispatch_continuation_init(dsn, dq, db, 0, DC_FLAG_CONSUME);
    _dispatch_group_notify(dg, dq, dsn);
}
#endif

_dispatch_group_notify():

// _dispatch_group_notify():
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq,
        dispatch_continuation_t dsn)
{
    uint64_t old_state, new_state;
    dispatch_continuation_t prev;

    dsn->dc_data = dq;
    _dispatch_retain(dq);

    /**
     #define os_mpsc_push_update_tail(Q, tail, _o_next)  ({ \
         os_mpsc_node_type(Q) _tl = (tail); \
         os_atomic_store2o(_tl, _o_next, NULL, relaxed); \
         os_atomic_xchg(_os_mpsc_tail Q, _tl, release); \
     })
     */
    // dg -> os state
    prev = os_mpsc_push_update_tail(os_mpsc(dg, dg_notify), dsn, do_next);
    if (os_mpsc_push_was_empty(prev)) _dispatch_retain(dg);
    os_mpsc_push_update_prev(os_mpsc(dg, dg_notify), prev, dsn, do_next);
    if (os_mpsc_push_was_empty(prev)) {
        os_atomic_rmw_loop2o(dg, dg_state, old_state, new_state, release, {
            new_state = old_state | DISPATCH_GROUP_HAS_NOTIFS;
            if ((uint32_t)old_state == 0) {
                // old_state 還原為 0 則繼續后續操作
                os_atomic_rmw_loop_give_up({
                    // 喚醒
                    return _dispatch_group_wake(dg, new_state, false);
                });
            }
        });
    }
}


// _dispatch_group_wake():
DISPATCH_NOINLINE
static void
_dispatch_group_wake(dispatch_group_t dg, uint64_t dg_state, bool needs_release)
{
    // needs_release = false --> refs = 0
    uint16_t refs = needs_release ? 1 : 0; // <rdar://problem/22318411>
    // dg_state = 2 --> 2 & 0x0000000000000002ULL = 2
    if (dg_state & DISPATCH_GROUP_HAS_NOTIFS) {
        dispatch_continuation_t dc, next_dc, tail;

        // Snapshot before anything is notified/woken <rdar://problem/8554546>
        dc = os_mpsc_capture_snapshot(os_mpsc(dg, dg_notify), &tail);
        do {
            dispatch_queue_t dsn_queue = (dispatch_queue_t)dc->dc_data;
            next_dc = os_mpsc_pop_snapshot_head(dc, tail, do_next);
            //
            /** 這里操作和 異步函數 很像了 push
             DISPATCH_ALWAYS_INLINE
             static inline void
             _dispatch_continuation_async(dispatch_queue_class_t dqu,
                     dispatch_continuation_t dc, dispatch_qos_t qos, uintptr_t dc_flags)
             {
             #if DISPATCH_INTROSPECTION
                 if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
                     _dispatch_trace_item_push(dqu, dc);
                 }
             #else
                 (void)dc_flags;
             #endif
                 return dx_push(dqu._dq, dc, qos);
             }
             */
            _dispatch_continuation_async(dsn_queue, dc,
                    _dispatch_qos_from_pp(dc->dc_priority), dc->dc_flags);
            _dispatch_release(dsn_queue);
        } while ((dc = next_dc));

        refs++;
    }

    if (dg_state & DISPATCH_GROUP_HAS_WAITERS) {
        _dispatch_wake_by_address(&dg->dg_gen);
    }

    if (refs) _dispatch_release_n(dg, refs);
}

由上源碼與場景可知,dispatch_group_enter()時,old_bits: 0-->-1
dispatch_group_notify()由源碼可知,old_state == 0時才進行_dispatch_group_wake(),so,若未dispatch_group_leave(),以-1的狀態,是不會執行 weak 的。

4.2.4、dispatch_group_leave()
void
dispatch_group_leave(dispatch_group_t dg)
{
    // The value is incremented on a 64bits wide atomic so that the carry for
    // the -1 -> 0 transition increments the generation atomically.
    // os_atomic_add_orig2o :++ 操作
    uint64_t new_state, old_state = os_atomic_add_orig2o(dg, dg_state,
            DISPATCH_GROUP_VALUE_INTERVAL, release);
    // old_state -1 + 1 = 0
    // 0 & 任意 = 0
    uint32_t old_value = (uint32_t)(old_state & DISPATCH_GROUP_VALUE_MASK);
    
    // DISPATCH_GROUP_VALUE_1  0x00000000fffffffcULL
    if (unlikely(old_value == DISPATCH_GROUP_VALUE_1)) {
        old_state += DISPATCH_GROUP_VALUE_INTERVAL;
        do {
            new_state = old_state;
            if ((old_state & DISPATCH_GROUP_VALUE_MASK) == 0) {
                new_state &= ~DISPATCH_GROUP_HAS_WAITERS;
                new_state &= ~DISPATCH_GROUP_HAS_NOTIFS;
            } else {
                // If the group was entered again since the atomic_add above,
                // we can't clear the waiters bit anymore as we don't know for
                // which generation the waiters are for
                new_state &= ~DISPATCH_GROUP_HAS_NOTIFS;
            }
            if (old_state == new_state) break;
        } while (unlikely(!os_atomic_cmpxchgv2o(dg, dg_state,
                old_state, new_state, &old_state, relaxed)));
        // 喚醒 group_weak
        return _dispatch_group_wake(dg, old_state, true);
    }

    // 若leave 多于enter 不匹配 : 0 +1 = 1, 報錯
    if (unlikely(old_value == 0)) {
        DISPATCH_CLIENT_CRASH((uintptr_t)old_value,
                "Unbalanced call to dispatch_group_leave()");
    }
}

dispatch_group_leave()也可喚醒_dispatch_group_wake().

由上,關于問題01已解:dispatch_group_notify()的weak 是通過 old_state = 0來控制的,在將dispatch_group_notify()添加在最前時,沒有 enter --> -1,調用時直接進去了。

4.2.5、dispatch_group_async()

關于問題02,dispatch_group_async() 內部是否封裝了 dispatch_group_enter() / dispatch_group_leave()呢?

#ifdef __BLOCKS__
void
dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
        dispatch_block_t db)
{
    dispatch_continuation_t dc = _dispatch_continuation_alloc();
    uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_GROUP_ASYNC;
    dispatch_qos_t qos;

    qos = _dispatch_continuation_init(dc, dq, db, 0, dc_flags);
    _dispatch_continuation_group_async(dg, dq, dc, qos);
}
#endif

// _dispatch_continuation_group_async():
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_continuation_group_async(dispatch_group_t dg, dispatch_queue_t dq,
        dispatch_continuation_t dc, dispatch_qos_t qos)
{
    dispatch_group_enter(dg);// dispatch_group_enter()
    dc->dc_data = dg;
    // 異步函數 類同
    _dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}

由上源碼,dispatch_group_async() 內部調了dispatch_group_enter(dg),但是dispatch_group_leave()呢?--> 猜測應該在block任務執行完畢后隱形內部調用了。

全局搜索_dispatch_client_callout

DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_continuation_with_group_invoke(dispatch_continuation_t dc)
{
    struct dispatch_object_s *dou = dc->dc_data;
    unsigned long type = dx_type(dou);
    if (type == DISPATCH_GROUP_TYPE) {// 是否是調度組 group
        _dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
        _dispatch_trace_item_complete(dc);
        dispatch_group_leave((dispatch_group_t)dou);// dispatch_group_leave
    } else {
        DISPATCH_INTERNAL_CRASH(dx_type(dou), "Unexpected object type");
    }
}

驗證了:調度組group進行調用client_callout --> trace_item_complete 完成后執行了dispatch_group_leave().

以上。

最后編輯于
?著作權歸作者所有,轉載或內容合作請聯系作者
平臺聲明:文章內容(如有圖片或視頻亦包括在內)由作者上傳并發布,文章內容僅代表作者本人觀點,簡書系信息發布平臺,僅提供信息存儲服務。

推薦閱讀更多精彩內容

  • iOS 底層原理 文章匯總[http://www.lxweimin.com/p/412b20d9a0f6] 本文是...
    Style_月月閱讀 5,028評論 14 15
  • 單例 單例(dispatch_once),我們溯源最后找到了 這里的l來源于static靜態變量,所以有唯一性,如...
    Mjs閱讀 267評論 1 1
  • 閱讀源碼是枯燥的,可能暫時對我們的工作沒什么幫助,現在但是作為一個有一定開發經驗的開發人員而言,這一步是必須要走的...
    我叫Vincent閱讀 1,921評論 0 2
  • 在上篇文章函數與隊列和gcd原理分析(上)[http://www.lxweimin.com/p/9c0b238c4...
    北京_小海閱讀 278評論 1 1
  • 一、同步執行dispatch_sync 1.1 鎖的原因 我們都知道,當使用dispatch_sync在串行隊列上...
    奉灬孝閱讀 485評論 1 4