JobSchedulerService

1 成員變量

    /** Master list of jobs. */
    final JobStore mJobs;  //job list
    /** Tracking the standby bucket state of each app */
    final StandbyTracker mStandbyTracker;
    /**
     * Track Services that have currently active or pending jobs. The index is provided by
     * {@link JobStatus#getServiceToken()}
     */
    final List<JobServiceContext> mActiveServices = new ArrayList<>();
    /** List of controllers that will notify this service of updates to jobs. */
    final List<StateController> mControllers;
    /**
     * Queue of pending jobs. The JobServiceContext class will receive jobs from this list
     * when ready to execute them.
     */
    final ArrayList<JobStatus> mPendingJobs = new ArrayList<>();
    /** Need directly for receiving thermal events */
    private IThermalService mThermalService;

    final JobHandler mHandler;
    /**
     * Whether to use heartbeats or rolling window for quota management. True will use
     * heartbeats, false will use a rolling window.
     */
     public boolean USE_HEARTBEATS = DEFAULT_USE_HEARTBEATS; //默認未false
            // Max job counts for screen on / off, for each memory trim level.
     final MaxJobCountsPerMemoryTrimLevel MAX_JOB_COUNTS_SCREEN_ON =
                new MaxJobCountsPerMemoryTrimLevel(
                        new MaxJobCounts(
                                8, "max_job_total_on_normal",
                                6, "max_job_max_bg_on_normal",
                                2, "max_job_min_bg_on_normal"),
                        new MaxJobCounts(
                                8, "max_job_total_on_moderate",
                                4, "max_job_max_bg_on_moderate",
                                2, "max_job_min_bg_on_moderate"),
                        new MaxJobCounts(
                                5, "max_job_total_on_low",
                                1, "max_job_max_bg_on_low",
                                1, "max_job_min_bg_on_low"),
                        new MaxJobCounts(
                                5, "max_job_total_on_critical",
                                1, "max_job_max_bg_on_critical",
                                1, "max_job_min_bg_on_critical"));

    final MaxJobCountsPerMemoryTrimLevel MAX_JOB_COUNTS_SCREEN_OFF =
                new MaxJobCountsPerMemoryTrimLevel(
                        new MaxJobCounts(
                                10, "max_job_total_off_normal",
                                6, "max_job_max_bg_off_normal",
                                2, "max_job_min_bg_off_normal"),
                        new MaxJobCounts(
                                10, "max_job_total_off_moderate",
                                4, "max_job_max_bg_off_moderate",
                                2, "max_job_min_bg_off_moderate"),
                        new MaxJobCounts(
                                5, "max_job_total_off_low",
                                1, "max_job_max_bg_off_low",
                                1, "max_job_min_bg_off_low"),
                        new MaxJobCounts(
                                5, "max_job_total_off_critical",
                                1, "max_job_max_bg_off_critical",
                                1, "max_job_min_bg_off_critical"));

2 mJobs = JobStore.initAndGet(this);

//讀取/data/system/job/jobs.xml中的persist的job任務

<job jobid="111007999" package="com.google.android.apps.walletnfcrel" class="com.google.android.libraries.notifications.entrypoints.scheduled.ScheduledTaskService" sourcePackageName="com.google.android.apps.walletnfcrel" sourceUserId="0" uid="10089" priority="0" flags="0" lastSuccessfulRunTime="1594621477078" lastFailedRunTime="1593691857483">
    <constraints net-capabilities="94208" net-unwanted-capabilities="0" net-transport-types="0" />
    <periodic period="86400000" flex="86400000" deadline="1596011419776" delay="1596011419776" />
    <extras>
        <string name="com.google.android.libraries.notifications.INTENT_EXTRA_TASK_HANDLER">PERIODIC_TASK</string>
    </extras>
</job>

<job jobid="3" package="com.google.android.apps.turbo" class="com.google.android.apps.turbo.nudges.battery.LoggingPermissionsJobService" sourcePackageName="com.google.android.apps.turbo" sourceUserId="0" uid="10115" priority="0" flags="0" lastSuccessfulRunTime="1596024208322" lastFailedRunTime="0">
    <constraints idle="true" charging="true" />
    <periodic period="86400000" flex="86400000" deadline="1596184219771" delay="1596097819771" />
    <extras />
</job>

 <job jobid="108" package="com.google.android.apps.turbo" class="com.google.android.libraries.smartbattery.brightness.library.UpdateOfflineModelJob" sourcePackageName="com.google.android.apps.turbo" sourceUserId="0" uid="10115" priority="0" flags="0" lastSuccessfulRunTime="0" lastFailedRunTime="0">
    <constraints charging="true" />
    <one-off delay="1596541727614" />
    <extras />
</job>

constraints: 滿足條件
periodic:job的周期

3 schedule

-> scheduleAsPackage
        -> 判斷isAppStartModeDisabled,確認是否允許后臺執行.
        -> 根據jobinfo創建jobStatus
        -> startTrackingJobLocked
            // If the job is immediately ready to run, then we can just immediately
            // put it in the pending list and try to schedule it.  This is especially
            // important for jobs with a 0 deadline constraint, since they will happen a fair
            // amount, we want to handle them as quickly as possible, and semantically we want to
            // make sure we have started holding the wake lock for the job before returning to
            // the caller.
            // If the job is not yet ready to run, there is nothing more to do -- we are
            // now just waiting for one of its controllers to change state and schedule
            // the job appropriately.
        -> isReadyToBeExecutedLocked
            -> maybeRunPendingJobsLocked 
         or -> evaluateControllerStatesLocked

3.1 判斷是否可以執行job

/**
     * Criteria for moving a job into the pending queue:
     *      - It's ready.
     *      - It's not pending.
     *      - It's not already running on a JSC.
     *      - Temperture is too high, and don't run job of request network and proity
     *      - The user that requested the job is running.
     *      - The job's standby bucket has come due to be runnable.
     *      - The component is enabled and runnable.
     */
    private boolean isReadyToBeExecutedLocked(JobStatus job)

3.2 決定是否實際需要執行

/**
     * ** Reconcile jobs in the pending queue against available execution contexts.**
     * A controller can force a job into the pending queue even if it's already running, but
     * here is where we decide whether to actually execute it.
     */
    void maybeRunPendingJobsLocked() {
        /**
         * Takes jobs from pending queue and runs them on available contexts.
         * If no contexts are available, preempts lower priority jobs to
         * run higher priority ones.
         * Lock on mJobs before calling this function.
         */
        assignJobsToContextsLocked//核心邏輯

3.3 assignJobsToContextsLocked

private void assignJobsToContextsInternalLocked() {
        if (DEBUG) {
            Slog.d(TAG, printPendingQueueLocked());
        }

        final JobPackageTracker tracker = mService.mJobPackageTracker;
        final List<JobStatus> pendingJobs = mService.mPendingJobs;//獲取pending jobs
        final List<JobServiceContext> activeServices = mService.mActiveServices;//獲取active jobservicecontext
        final List<StateController> controllers = mService.mControllers;//獲取狀態控制器

        updateMaxCountsLocked();//更新最大counts

        // To avoid GC churn, we recycle the arrays.
        JobStatus[] contextIdToJobMap = mRecycledAssignContextIdToJobMap;//實際是索引ActiveServices的狀態
        boolean[] slotChanged = mRecycledSlotChanged;
        int[] preferredUidForContext = mRecycledPreferredUidForContext;


        // Initialize the work variables and also count running jobs.
        mJobCountTracker.reset(
                mMaxJobCounts.getMaxTotal(),
                mMaxJobCounts.getMaxBg(),
                mMaxJobCounts.getMinBg());

        for (int i=0; i<MAX_JOB_CONTEXTS_COUNT; i++) {
            final JobServiceContext js = mService.mActiveServices.get(i);
            final JobStatus status = js.getRunningJobLocked();//或者running jobstatus

            if ((contextIdToJobMap[i] = status) != null) {//將running jobstatus(包括為null的情況)賦值給contextIdToJobMap
                mJobCountTracker.incrementRunningJobCount(isFgJob(status));
            }

            slotChanged[i] = false;
            preferredUidForContext[i] = js.getPreferredUid();
        }
        if (DEBUG) {
            Slog.d(TAG, printContextIdToJobMap(contextIdToJobMap, "running jobs initial"));
        }

        // Next, update the job priorities, and also count the pending FG / BG jobs.
        for (int i = 0; i < pendingJobs.size(); i++) {
            final JobStatus pending = pendingJobs.get(i);

            // If job is already running, go to next job.
            int jobRunningContext = findJobContextIdFromMap(pending, contextIdToJobMap);
            if (jobRunningContext != -1) {
                continue;
            }

            final int priority = mService.evaluateJobPriorityLocked(pending);//計算沒有運行job的pendingjob的優先級
            pending.lastEvaluatedPriority = priority;//賦值

            mJobCountTracker.incrementPendingJobCount(isFgJob(pending));
        }

        mJobCountTracker.onCountDone();

        for (int i = 0; i < pendingJobs.size(); i++) {
            final JobStatus nextPending = pendingJobs.get(i);//獲取pending job

            // Unfortunately we need to repeat this relatively expensive check.
            int jobRunningContext = findJobContextIdFromMap(nextPending, contextIdToJobMap);
            if (jobRunningContext != -1) {
                continue;
            }

            final boolean isPendingFg = isFgJob(nextPending);//是否是前臺job(優先級大于TOP-APP)

            // Find an available slot for nextPending. The context should be available OR
            // it should have lowest priority among all running jobs
            // (sharing the same Uid as nextPending)
            int minPriorityForPreemption = Integer.MAX_VALUE;
            int selectedContextId = -1;
            boolean startingJob = false;
            for (int j=0; j<MAX_JOB_CONTEXTS_COUNT; j++) {
                JobStatus job = contextIdToJobMap[j];//獲取running job的jobstatus
                int preferredUid = preferredUidForContext[j];//獲取running uid
                if (job == null) {//如果當前的jobservicecontext上面沒有running job
                    final boolean preferredUidOkay = (preferredUid == nextPending.getUid())
                            || (preferredUid == JobServiceContext.NO_PREFERRED_UID);

                    if (preferredUidOkay && mJobCountTracker.canJobStart(isPendingFg)) {
                        // This slot is free, and we haven't yet hit the limit on
                        // concurrent jobs...  we can just throw the job in to here.
                        selectedContextId = j;
                        startingJob = true;
                        break;
                    }
                    // No job on this context, but nextPending can't run here because
                    // the context has a preferred Uid or we have reached the limit on
                    // concurrent jobs.
                    continue;
                }
                if (job.getUid() != nextPending.getUid()) {
                    continue;
                }

                final int jobPriority = mService.evaluateJobPriorityLocked(job);//計算優先級
                if (jobPriority >= nextPending.lastEvaluatedPriority) {
                    continue;
                }

                // TODO lastEvaluatedPriority should be evaluateJobPriorityLocked. (double check it)
                if (minPriorityForPreemption > nextPending.lastEvaluatedPriority) {
                    minPriorityForPreemption = nextPending.lastEvaluatedPriority;
                    selectedContextId = j;
                    // In this case, we're just going to preempt a low priority job, we're not
                    // actually starting a job, so don't set startingJob.
                }
            }
            if (selectedContextId != -1) {
                contextIdToJobMap[selectedContextId] = nextPending;
                slotChanged[selectedContextId] = true;
            }
            if (startingJob) {
                // Increase the counters when we're going to start a job.
                mJobCountTracker.onStartingNewJob(isPendingFg);
            }
        }
        if (DEBUG) {
            Slog.d(TAG, printContextIdToJobMap(contextIdToJobMap, "running jobs final"));
        }

        mJobCountTracker.logStatus();

        tracker.noteConcurrency(mJobCountTracker.getTotalRunningJobCountToNote(),
                mJobCountTracker.getFgRunningJobCountToNote());

        for (int i=0; i<MAX_JOB_CONTEXTS_COUNT; i++) {//再次遍歷
            boolean preservePreferredUid = false;
            if (slotChanged[i]) {//slot是否改變
                JobStatus js = activeServices.get(i).getRunningJobLocked();//獲取active running jobstatus
                if (js != null) {
                    if (DEBUG) {
                        Slog.d(TAG, "preempting job: "
                                + activeServices.get(i).getRunningJobLocked());
                    }
                    // preferredUid will be set to uid of currently running job.
                    activeServices.get(i).preemptExecutingJobLocked();//搶占job
                    preservePreferredUid = true;
                } else {
                    final JobStatus pendingJob = contextIdToJobMap[i];
                    if (DEBUG) {
                        Slog.d(TAG, "About to run job on context "
                                + i + ", job: " + pendingJob);
                    }
                    for (int ic=0; ic<controllers.size(); ic++) {
                        controllers.get(ic).prepareForExecutionLocked(pendingJob);//回調prepare
                    }
                    if (!activeServices.get(i).executeRunnableJob(pendingJob)) {//執行job
                        Slog.d(TAG, "Error executing " + pendingJob);
                    }
                    if (pendingJobs.remove(pendingJob)) {//移除job
                        tracker.noteNonpending(pendingJob);
                    }
                }
            }
            if (!preservePreferredUid) {
                activeServices.get(i).clearPreferredUid();
            }
        }
    }
?著作權歸作者所有,轉載或內容合作請聯系作者
平臺聲明:文章內容(如有圖片或視頻亦包括在內)由作者上傳并發布,文章內容僅代表作者本人觀點,簡書系信息發布平臺,僅提供信息存儲服務。
  • 序言:七十年代末,一起剝皮案震驚了整個濱河市,隨后出現的幾起案子,更是在濱河造成了極大的恐慌,老刑警劉巖,帶你破解...
    沈念sama閱讀 230,527評論 6 544
  • 序言:濱河連續發生了三起死亡事件,死亡現場離奇詭異,居然都是意外死亡,警方通過查閱死者的電腦和手機,發現死者居然都...
    沈念sama閱讀 99,687評論 3 429
  • 文/潘曉璐 我一進店門,熙熙樓的掌柜王于貴愁眉苦臉地迎上來,“玉大人,你說我怎么就攤上這事。” “怎么了?”我有些...
    開封第一講書人閱讀 178,640評論 0 383
  • 文/不壞的土叔 我叫張陵,是天一觀的道長。 經常有香客問我,道長,這世上最難降的妖魔是什么? 我笑而不...
    開封第一講書人閱讀 63,957評論 1 318
  • 正文 為了忘掉前任,我火速辦了婚禮,結果婚禮上,老公的妹妹穿的比我還像新娘。我一直安慰自己,他們只是感情好,可當我...
    茶點故事閱讀 72,682評論 6 413
  • 文/花漫 我一把揭開白布。 她就那樣靜靜地躺著,像睡著了一般。 火紅的嫁衣襯著肌膚如雪。 梳的紋絲不亂的頭發上,一...
    開封第一講書人閱讀 56,011評論 1 329
  • 那天,我揣著相機與錄音,去河邊找鬼。 笑死,一個胖子當著我的面吹牛,可吹牛的內容都是我干的。 我是一名探鬼主播,決...
    沈念sama閱讀 44,009評論 3 449
  • 文/蒼蘭香墨 我猛地睜開眼,長吁一口氣:“原來是場噩夢啊……” “哼!你這毒婦竟也來了?” 一聲冷哼從身側響起,我...
    開封第一講書人閱讀 43,183評論 0 290
  • 序言:老撾萬榮一對情侶失蹤,失蹤者是張志新(化名)和其女友劉穎,沒想到半個月后,有當地人在樹林里發現了一具尸體,經...
    沈念sama閱讀 49,714評論 1 336
  • 正文 獨居荒郊野嶺守林人離奇死亡,尸身上長有42處帶血的膿包…… 初始之章·張勛 以下內容為張勛視角 年9月15日...
    茶點故事閱讀 41,435評論 3 359
  • 正文 我和宋清朗相戀三年,在試婚紗的時候發現自己被綠了。 大學時的朋友給我發了我未婚夫和他白月光在一起吃飯的照片。...
    茶點故事閱讀 43,665評論 1 374
  • 序言:一個原本活蹦亂跳的男人離奇死亡,死狀恐怖,靈堂內的尸體忽然破棺而出,到底是詐尸還是另有隱情,我是刑警寧澤,帶...
    沈念sama閱讀 39,148評論 5 365
  • 正文 年R本政府宣布,位于F島的核電站,受9級特大地震影響,放射性物質發生泄漏。R本人自食惡果不足惜,卻給世界環境...
    茶點故事閱讀 44,838評論 3 350
  • 文/蒙蒙 一、第九天 我趴在偏房一處隱蔽的房頂上張望。 院中可真熱鬧,春花似錦、人聲如沸。這莊子的主人今日做“春日...
    開封第一講書人閱讀 35,251評論 0 28
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽。三九已至,卻和暖如春,著一層夾襖步出監牢的瞬間,已是汗流浹背。 一陣腳步聲響...
    開封第一講書人閱讀 36,588評論 1 295
  • 我被黑心中介騙來泰國打工, 沒想到剛下飛機就差點兒被人妖公主榨干…… 1. 我叫王不留,地道東北人。 一個月前我還...
    沈念sama閱讀 52,379評論 3 400
  • 正文 我出身青樓,卻偏偏與公主長得像,于是被迫代替她去往敵國和親。 傳聞我的和親對象是個殘疾皇子,可洞房花燭夜當晚...
    茶點故事閱讀 48,627評論 2 380