[go: nahoru, domu]

Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

new_audit: add TTI Companion Metric to JSON #8975

Merged
merged 14 commits into from
Jun 5, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions lighthouse-cli/test/cli/__snapshots__/index-test.js.snap
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,9 @@ Object {
Object {
"path": "metrics/estimated-input-latency",
},
Object {
"path": "metrics/cumulative-long-queuing-delay",
},
Object {
"path": "metrics/max-potential-fid",
},
Expand Down Expand Up @@ -710,6 +713,10 @@ Object {
"id": "estimated-input-latency",
"weight": 0,
},
Object {
"id": "cumulative-long-queuing-delay",
"weight": 0,
},
Object {
"group": "load-opportunities",
"id": "render-blocking-resources",
Expand Down
4 changes: 4 additions & 0 deletions lighthouse-core/audits/metrics.js
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ const FirstCPUIdle = require('../computed/metrics/first-cpu-idle.js');
const Interactive = require('../computed/metrics/interactive.js');
const SpeedIndex = require('../computed/metrics/speed-index.js');
const EstimatedInputLatency = require('../computed/metrics/estimated-input-latency.js');
const CumulativeLongQueuingDelay = require('../computed/metrics/cumulative-long-queuing-delay.js');

class Metrics extends Audit {
/**
Expand Down Expand Up @@ -59,6 +60,7 @@ class Metrics extends Audit {
const interactive = await requestOrUndefined(Interactive, metricComputationData);
const speedIndex = await requestOrUndefined(SpeedIndex, metricComputationData);
const estimatedInputLatency = await EstimatedInputLatency.request(metricComputationData, context); // eslint-disable-line max-len
const cumulativeLongQueuingDelay = await CumulativeLongQueuingDelay.request(metricComputationData, context); // eslint-disable-line max-len

/** @type {UberMetricsItem} */
const metrics = {
Expand All @@ -75,6 +77,7 @@ class Metrics extends Audit {
speedIndexTs: speedIndex && speedIndex.timestamp,
estimatedInputLatency: estimatedInputLatency.timing,
estimatedInputLatencyTs: estimatedInputLatency.timestamp,
cumulativeLongQueuingDelay: cumulativeLongQueuingDelay.timing,

// Include all timestamps of interest from trace of tab
observedNavigationStart: traceOfTab.timings.navigationStart,
Expand Down Expand Up @@ -137,6 +140,7 @@ class Metrics extends Audit {
* @property {number=} speedIndexTs
* @property {number} estimatedInputLatency
* @property {number=} estimatedInputLatencyTs
* @property {number} cumulativeLongQueuingDelay
* @property {number} observedNavigationStart
* @property {number} observedNavigationStartTs
* @property {number=} observedFirstPaint
Expand Down
79 changes: 79 additions & 0 deletions lighthouse-core/audits/metrics/cumulative-long-queuing-delay.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
/**
* @license Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
'use strict';

const Audit = require('../audit.js');
const CumulativeLQD = require('../../computed/metrics/cumulative-long-queuing-delay.js');

// TODO(deepanjanroy): i18n strings once metric is final.
const UIStringsNotExported = {
title: 'Cumulative Long Queuing Delay',
brendankenny marked this conversation as resolved.
Show resolved Hide resolved
description: '[Experimental metric] Total time period between FCP and Time to Interactive ' +
'during which queuing time for any input event would be higher than 50ms.',
};

class CumulativeLongQueuingDelay extends Audit {
/**
* @return {LH.Audit.Meta}
*/
static get meta() {
return {
id: 'cumulative-long-queuing-delay',
title: UIStringsNotExported.title,
description: UIStringsNotExported.description,
scoreDisplayMode: Audit.SCORING_MODES.NUMERIC,
requiredArtifacts: ['traces', 'devtoolsLogs'],
};
}

/**
* @return {LH.Audit.ScoreOptions}
*/
static get defaultOptions() {
return {
// According to a cluster telemetry run over top 10k sites on mobile, 5th percentile was 0ms,
// 25th percentile was 270ms and median was 895ms. These numbers include 404 pages. Picking
// thresholds according to our 25/75-th rule will be quite harsh scoring (a single 350ms task)
// after FCP will yield a score of .5. The following coefficients are semi-arbitrarily picked
// to give 600ms jank a score of .5 and 100ms jank a score of .999. We can tweak these numbers
// in the future. See https://www.desmos.com/calculator/a7ib75kq3g
scoreMedian: 600,
scorePODR: 200,
};
}

/**
* Audits the page to calculate Cumulative Long Queuing Delay.
*
* We define Long Queuing Delay Region as any time interval in the loading timeline where queuing
* time for an input event would be longer than 50ms. For example, if there is a 110ms main thread
* task, the first 60ms of it is Long Queuing Delay Region, because any input event occuring in
deepanjanroy marked this conversation as resolved.
Show resolved Hide resolved
* that region has to wait more than 50ms. Cumulative Long Queuing Delay is the sum of all Long
* Queuing Delay Regions between First Contentful Paint and Interactive Time (TTI).
*
* @param {LH.Artifacts} artifacts
* @param {LH.Audit.Context} context
* @return {Promise<LH.Audit.Product>}
*/
static async audit(artifacts, context) {
const trace = artifacts.traces[Audit.DEFAULT_PASS];
const devtoolsLog = artifacts.devtoolsLogs[Audit.DEFAULT_PASS];
const metricComputationData = {trace, devtoolsLog, settings: context.settings};
const metricResult = await CumulativeLQD.request(metricComputationData, context);

return {
score: Audit.computeLogNormalScore(
metricResult.timing,
context.options.scorePODR,
context.options.scoreMedian
),
numericValue: metricResult.timing,
displayValue: 10 * Math.round(metricResult.timing / 10) + '\xa0ms',
};
}
}

module.exports = CumulativeLongQueuingDelay;
124 changes: 124 additions & 0 deletions lighthouse-core/computed/metrics/cumulative-long-queuing-delay.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
/**
* @license Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
'use strict';

const makeComputedArtifact = require('../computed-artifact.js');
const ComputedMetric = require('./metric.js');
const LHError = require('../../lib/lh-error.js');
const TracingProcessor = require('../../lib/traces/tracing-processor.js');
const LanternCumulativeLongQueuingDelay = require('./lantern-cumulative-long-queuing-delay.js');
const TimetoInteractive = require('./interactive.js');

/**
* @fileoverview This audit determines Cumulative Long Queuing Delay between FCP and TTI.

* We define Long Queuing Delay Region as any time interval in the loading timeline where queuing
* time for an input event would be longer than 50ms. For example, if there is a 110ms main thread
* task, the first 60ms of it is Long Queuing Delay Region, because any input event occuring in
* that region has to wait more than 50ms. Cumulative Long Queuing Delay is the sum of all Long
* Queuing Delay Regions between First Contentful Paint and Interactive Time (TTI).
*
* This is a new metric designed to accompany Time to Interactive. TTI is strict and does not
* reflect incremental improvements to the site performance unless the improvement concerns the last
* long task. Cumulative Long Queuing Delay on the other hand is designed to be much more responsive
* to smaller improvements to main thread responsiveness.
*/
class CumulativeLongQueuingDelay extends ComputedMetric {
/**
* @return {number}
brendankenny marked this conversation as resolved.
Show resolved Hide resolved
*/
static get LONG_QUEUING_DELAY_THRESHOLD() {
return 50;
}
/**
* @param {Array<{start: number, end: number, duration: number}>} topLevelEvents
* @param {number} fcpTimeInMs
* @param {number} interactiveTimeMs
* @return {number}
*/
static calculateSumOfLongQueuingDelay(topLevelEvents, fcpTimeInMs, interactiveTimeMs) {
if (interactiveTimeMs <= fcpTimeInMs) return 0;

const threshold = CumulativeLongQueuingDelay.LONG_QUEUING_DELAY_THRESHOLD;
const longQueuingDelayRegions = [];
// First identifying the long queuing delay regions.
for (const event of topLevelEvents) {
// If the task is less than the delay threshold, it contains no Long Queuing Delay Region.
if (event.duration < threshold) continue;
// Otherwise, the duration of the task before the delay-threshold-sized interval at the end is
// considered Long Queuing Delay Region. Example assuming the threshold is 50ms:
// [ 250ms Task ]
// | Long Queuing Delay Region | Last 50ms |
// 200 ms
longQueuingDelayRegions.push({
start: event.start,
patrickhulce marked this conversation as resolved.
Show resolved Hide resolved
end: event.end - threshold,
duration: event.duration - threshold,
});
}

let sumLongQueuingDelay = 0;
for (const region of longQueuingDelayRegions) {
// We only want to add up the Long Queuing Delay regions that fall between FCP and TTI.
//
// FCP is picked as the lower bound because there is little risk of user input happening
// before FCP so Long Queuing Qelay regions do not harm user experience. Developers should be
// optimizing to reach FCP as fast as possible without having to worry about task lengths.
//
// TTI is picked as the upper bound because we want a well defined end point so that the
// metric does not rely on how long we trace.
if (region.end < fcpTimeInMs) continue;
patrickhulce marked this conversation as resolved.
Show resolved Hide resolved
if (region.start > interactiveTimeMs) continue;

// If a Long Queuing Delay Region spans the edges of our region of interest, we clip it to
// only include the part of the region that falls inside.
const clippedStart = Math.max(region.start, fcpTimeInMs);
const clippedEnd = Math.min(region.end, interactiveTimeMs);
const queuingDelayAfterClipping = clippedEnd - clippedStart;

sumLongQueuingDelay += queuingDelayAfterClipping;
}

return sumLongQueuingDelay;
}

/**
* @param {LH.Artifacts.MetricComputationData} data
* @param {LH.Audit.Context} context
* @return {Promise<LH.Artifacts.LanternMetric>}
*/
static computeSimulatedMetric(data, context) {
return LanternCumulativeLongQueuingDelay.request(data, context);
}

/**
* @param {LH.Artifacts.MetricComputationData} data
* @param {LH.Audit.Context} context
* @return {Promise<LH.Artifacts.Metric>}
*/
static async computeObservedMetric(data, context) {
const {firstContentfulPaint} = data.traceOfTab.timings;
if (!firstContentfulPaint) {
throw new LHError(LHError.errors.NO_FCP);
}

const interactiveTimeMs = (await TimetoInteractive.request(data, context)).timing;

// Not using the start time argument of getMainThreadTopLevelEvents, because
// we need to clip the part of the task before the last 50ms properly.
const events = TracingProcessor.getMainThreadTopLevelEvents(data.traceOfTab);

return {
timing: CumulativeLongQueuingDelay.calculateSumOfLongQueuingDelay(
events,
firstContentfulPaint,
interactiveTimeMs
),
};
}
}

module.exports = makeComputedArtifact(CumulativeLongQueuingDelay);
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
/**
* @license Copyright 2019 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
'use strict';

const makeComputedArtifact = require('../computed-artifact.js');
const LanternMetric = require('./lantern-metric.js');
const BaseNode = require('../../lib/dependency-graph/base-node.js');
const LanternFirstContentfulPaint = require('./lantern-first-contentful-paint.js');
const LanternInteractive = require('./lantern-interactive.js');

/** @typedef {BaseNode.Node} Node */

class LanternCumulativeLongQueuingDelay extends LanternMetric {
/**
* @return {LH.Gatherer.Simulation.MetricCoefficients}
*/
static get COEFFICIENTS() {
return {
intercept: 0,
optimistic: 0.5,
pessimistic: 0.5,
};
}

/**
* @param {Node} dependencyGraph
* @return {Node}
*/
static getOptimisticGraph(dependencyGraph) {
return dependencyGraph;
}

/**
* @param {Node} dependencyGraph
* @return {Node}
*/
static getPessimisticGraph(dependencyGraph) {
return dependencyGraph;
}

/**
* @param {LH.Gatherer.Simulation.Result} simulation
* @param {Object} extras
* @return {LH.Gatherer.Simulation.Result}
*/
static getEstimateFromSimulation(simulation, extras) {
// Intentionally use the opposite FCP estimate. A pessimistic FCP is higher than equal to an
// optimistic FCP, which means potentially more tasks are excluded from the
// CumulativeLongQueuingDelay computation. So a more pessimistic FCP gives a more optimistic
// CumulativeLongQueuingDelay for the same work.
const fcpTimeInMs = extras.optimistic
? extras.fcpResult.pessimisticEstimate.timeInMs
: extras.fcpResult.optimisticEstimate.timeInMs;

// Similarly, we always have pessimistic TTI >= optimistic TTI. Therefore, picking optimistic
// TTI means our window of interest is smaller and thus potentially more tasks are excluded from
// CumulativeLongQueuingDelay computation, yielding a lower (more optimistic)
// CumulativeLongQueuingDelay value for the same work.
const interactiveTimeMs = extras.optimistic
brendankenny marked this conversation as resolved.
Show resolved Hide resolved
? extras.interactiveResult.optimisticEstimate.timeInMs
: extras.interactiveResult.pessimisticEstimate.timeInMs;

// Require here to resolve circular dependency.
const CumulativeLongQueuingDelay = require('./cumulative-long-queuing-delay.js');
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: move down to above the return statement`

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm using the CumulativeLongQueuingDelay.LONG_QUEUING_DELAY_THRESHOLD constant unfortunately, so this is the furthest I can move it down.

const minDurationMs = CumulativeLongQueuingDelay.LONG_QUEUING_DELAY_THRESHOLD;

const events = LanternCumulativeLongQueuingDelay.getTopLevelEvents(
simulation.nodeTimings,
minDurationMs
);

return {
timeInMs: CumulativeLongQueuingDelay.calculateSumOfLongQueuingDelay(
events,
fcpTimeInMs,
interactiveTimeMs
),
nodeTimings: simulation.nodeTimings,
};
}

/**
* @param {LH.Artifacts.MetricComputationDataInput} data
* @param {LH.Audit.Context} context
* @return {Promise<LH.Artifacts.LanternMetric>}
*/
static async compute_(data, context) {
const fcpResult = await LanternFirstContentfulPaint.request(data, context);
const interactiveResult = await LanternInteractive.request(data, context);
return this.computeMetricWithGraphs(data, context, {fcpResult, interactiveResult});
}

/**
* @param {LH.Gatherer.Simulation.Result['nodeTimings']} nodeTimings
* @param {number} minDurationMs
*/
static getTopLevelEvents(nodeTimings, minDurationMs) {
/** @type {Array<{start: number, end: number, duration: number}>}
*/
const events = [];

for (const [node, timing] of nodeTimings.entries()) {
if (node.type !== BaseNode.TYPES.CPU) continue;
// Filtering out events below minimum duration.
if (timing.duration < minDurationMs) continue;

events.push({
start: timing.startTime,
end: timing.endTime,
duration: timing.duration,
});
}

return events;
}
}

module.exports = makeComputedArtifact(LanternCumulativeLongQueuingDelay);
3 changes: 2 additions & 1 deletion lighthouse-core/config/default-config.js
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,7 @@ const defaultConfig = {
'screenshot-thumbnails',
'final-screenshot',
'metrics/estimated-input-latency',
'metrics/cumulative-long-queuing-delay',
'metrics/max-potential-fid',
'errors-in-console',
'time-to-first-byte',
Expand Down Expand Up @@ -366,7 +367,7 @@ const defaultConfig = {
{id: 'first-cpu-idle', weight: 2, group: 'metrics'},
{id: 'max-potential-fid', weight: 0, group: 'metrics'},
{id: 'estimated-input-latency', weight: 0}, // intentionally left out of metrics so it won't be displayed

{id: 'cumulative-long-queuing-delay', weight: 0}, // intentionally left out of metrics so it won't be displayed
{id: 'render-blocking-resources', weight: 0, group: 'load-opportunities'},
{id: 'uses-responsive-images', weight: 0, group: 'load-opportunities'},
{id: 'offscreen-images', weight: 0, group: 'load-opportunities'},
Expand Down
Loading