next
Version:
The React Framework
839 lines (837 loc) • 39.1 kB
JavaScript
Object.defineProperty(exports, "__esModule", {
value: true
});
0 && (module.exports = {
cancelPrefetchTask: null,
pingPrefetchTask: null,
reschedulePrefetchTask: null,
schedulePrefetchTask: null
});
function _export(target, all) {
for(var name in all)Object.defineProperty(target, name, {
enumerable: true,
get: all[name]
});
}
_export(exports, {
cancelPrefetchTask: function() {
return cancelPrefetchTask;
},
pingPrefetchTask: function() {
return pingPrefetchTask;
},
reschedulePrefetchTask: function() {
return reschedulePrefetchTask;
},
schedulePrefetchTask: function() {
return schedulePrefetchTask;
}
});
const _matchsegments = require("../match-segments");
const _cache = require("./cache");
const _segmentcache = require("../segment-cache");
const scheduleMicrotask = typeof queueMicrotask === 'function' ? queueMicrotask : (fn)=>Promise.resolve().then(fn).catch((error)=>setTimeout(()=>{
throw error;
}));
const taskHeap = [];
// This is intentionally low so that when a navigation happens, the browser's
// internal network queue is not already saturated with prefetch requests.
const MAX_CONCURRENT_PREFETCH_REQUESTS = 3;
let inProgressRequests = 0;
let sortIdCounter = 0;
let didScheduleMicrotask = false;
function schedulePrefetchTask(key, treeAtTimeOfPrefetch, includeDynamicData, priority) {
// Spawn a new prefetch task
const task = {
key,
treeAtTimeOfPrefetch,
priority,
phase: 1,
hasBackgroundWork: false,
includeDynamicData,
sortId: sortIdCounter++,
isCanceled: false,
_heapIndex: -1
};
heapPush(taskHeap, task);
// Schedule an async task to process the queue.
//
// The main reason we process the queue in an async task is for batching.
// It's common for a single JS task/event to trigger multiple prefetches.
// By deferring to a microtask, we only process the queue once per JS task.
// If they have different priorities, it also ensures they are processed in
// the optimal order.
ensureWorkIsScheduled();
return task;
}
function cancelPrefetchTask(task) {
// Remove the prefetch task from the queue. If the task already completed,
// then this is a no-op.
//
// We must also explicitly mark the task as canceled so that a blocked task
// does not get added back to the queue when it's pinged by the network.
task.isCanceled = true;
heapDelete(taskHeap, task);
}
function reschedulePrefetchTask(task, treeAtTimeOfPrefetch, includeDynamicData, priority) {
// Bump the prefetch task to the top of the queue, as if it were a fresh
// task. This is essentially the same as canceling the task and scheduling
// a new one, except it reuses the original object.
//
// The primary use case is to increase the priority of a Link-initated
// prefetch on hover.
// Un-cancel the task, in case it was previously canceled.
task.isCanceled = false;
task.phase = 1;
// Assign a new sort ID to move it ahead of all other tasks at the same
// priority level. (Higher sort IDs are processed first.)
task.sortId = sortIdCounter++;
task.priority = priority;
task.treeAtTimeOfPrefetch = treeAtTimeOfPrefetch;
task.includeDynamicData = includeDynamicData;
if (task._heapIndex !== -1) {
// The task is already in the queue.
heapResift(taskHeap, task);
} else {
heapPush(taskHeap, task);
}
ensureWorkIsScheduled();
}
function ensureWorkIsScheduled() {
if (didScheduleMicrotask || !hasNetworkBandwidth()) {
// Either we already scheduled a task to process the queue, or there are
// too many concurrent requests in progress. In the latter case, the
// queue will resume processing once more bandwidth is available.
return;
}
didScheduleMicrotask = true;
scheduleMicrotask(processQueueInMicrotask);
}
/**
* Checks if we've exceeded the maximum number of concurrent prefetch requests,
* to avoid saturating the browser's internal network queue. This is a
* cooperative limit — prefetch tasks should check this before issuing
* new requests.
*/ function hasNetworkBandwidth() {
// TODO: Also check if there's an in-progress navigation. We should never
// add prefetch requests to the network queue if an actual navigation is
// taking place, to ensure there's sufficient bandwidth for render-blocking
// data and resources.
return inProgressRequests < MAX_CONCURRENT_PREFETCH_REQUESTS;
}
function spawnPrefetchSubtask(prefetchSubtask) {
// When the scheduler spawns an async task, we don't await its result.
// Instead, the async task writes its result directly into the cache, then
// pings the scheduler to continue.
//
// We process server responses streamingly, so the prefetch subtask will
// likely resolve before we're finished receiving all the data. The subtask
// result includes a promise that resolves once the network connection is
// closed. The scheduler uses this to control network bandwidth by tracking
// and limiting the number of concurrent requests.
inProgressRequests++;
return prefetchSubtask.then((result)=>{
if (result === null) {
// The prefetch task errored before it could start processing the
// network stream. Assume the connection is closed.
onPrefetchConnectionClosed();
return null;
}
// Wait for the connection to close before freeing up more bandwidth.
result.closed.then(onPrefetchConnectionClosed);
return result.value;
});
}
function onPrefetchConnectionClosed() {
inProgressRequests--;
// Notify the scheduler that we have more bandwidth, and can continue
// processing tasks.
ensureWorkIsScheduled();
}
function pingPrefetchTask(task) {
// "Ping" a prefetch that's already in progress to notify it of new data.
if (// Check if prefetch was canceled.
task.isCanceled || // Check if prefetch is already queued.
task._heapIndex !== -1) {
return;
}
// Add the task back to the queue.
heapPush(taskHeap, task);
ensureWorkIsScheduled();
}
function processQueueInMicrotask() {
didScheduleMicrotask = false;
// We aim to minimize how often we read the current time. Since nearly all
// functions in the prefetch scheduler are synchronous, we can read the time
// once and pass it as an argument wherever it's needed.
const now = Date.now();
// Process the task queue until we run out of network bandwidth.
let task = heapPeek(taskHeap);
while(task !== null && hasNetworkBandwidth()){
const route = (0, _cache.readOrCreateRouteCacheEntry)(now, task);
const exitStatus = pingRootRouteTree(now, task, route);
// The `hasBackgroundWork` field is only valid for a single attempt. Reset
// it immediately upon exit.
const hasBackgroundWork = task.hasBackgroundWork;
task.hasBackgroundWork = false;
switch(exitStatus){
case 0:
// The task yielded because there are too many requests in progress.
// Stop processing tasks until we have more bandwidth.
return;
case 1:
// The task is blocked. It needs more data before it can proceed.
// Keep the task out of the queue until the server responds.
heapPop(taskHeap);
// Continue to the next task
task = heapPeek(taskHeap);
continue;
case 2:
if (task.phase === 1) {
// Finished prefetching the route tree. Proceed to prefetching
// the segments.
task.phase = 0;
heapResift(taskHeap, task);
} else if (hasBackgroundWork) {
// The task spawned additional background work. Reschedule the task
// at background priority.
task.priority = _segmentcache.PrefetchPriority.Background;
heapResift(taskHeap, task);
} else {
// The prefetch is complete. Continue to the next task.
heapPop(taskHeap);
}
task = heapPeek(taskHeap);
continue;
default:
exitStatus;
}
}
}
/**
* Check this during a prefetch task to determine if background work can be
* performed. If so, it evaluates to `true`. Otherwise, it returns `false`,
* while also scheduling a background task to run later. Usage:
*
* @example
* if (background(task)) {
* // Perform background-pri work
* }
*/ function background(task) {
if (task.priority === _segmentcache.PrefetchPriority.Background) {
return true;
}
task.hasBackgroundWork = true;
return false;
}
function pingRootRouteTree(now, task, route) {
switch(route.status){
case _cache.EntryStatus.Empty:
{
// Route is not yet cached, and there's no request already in progress.
// Spawn a task to request the route, load it into the cache, and ping
// the task to continue.
// TODO: There are multiple strategies in the <Link> API for prefetching
// a route. Currently we've only implemented the main one: per-segment,
// static-data only.
//
// There's also <Link prefetch={true}> which prefetches both static *and*
// dynamic data. Similarly, we need to fallback to the old, per-page
// behavior if PPR is disabled for a route (via the incremental opt-in).
//
// Those cases will be handled here.
spawnPrefetchSubtask((0, _cache.fetchRouteOnCacheMiss)(route, task));
// If the request takes longer than a minute, a subsequent request should
// retry instead of waiting for this one. When the response is received,
// this value will be replaced by a new value based on the stale time sent
// from the server.
// TODO: We should probably also manually abort the fetch task, to reclaim
// server bandwidth.
route.staleAt = now + 60 * 1000;
// Upgrade to Pending so we know there's already a request in progress
route.status = _cache.EntryStatus.Pending;
// Intentional fallthrough to the Pending branch
}
case _cache.EntryStatus.Pending:
{
// Still pending. We can't start prefetching the segments until the route
// tree has loaded. Add the task to the set of blocked tasks so that it
// is notified when the route tree is ready.
const blockedTasks = route.blockedTasks;
if (blockedTasks === null) {
route.blockedTasks = new Set([
task
]);
} else {
blockedTasks.add(task);
}
return 1;
}
case _cache.EntryStatus.Rejected:
{
// Route tree failed to load. Treat as a 404.
return 2;
}
case _cache.EntryStatus.Fulfilled:
{
if (task.phase !== 0) {
// Do not prefetch segment data until we've entered the segment phase.
return 2;
}
// Recursively fill in the segment tree.
if (!hasNetworkBandwidth()) {
// Stop prefetching segments until there's more bandwidth.
return 0;
}
const tree = route.tree;
// Determine which fetch strategy to use for this prefetch task.
const fetchStrategy = task.includeDynamicData ? _cache.FetchStrategy.Full : route.isPPREnabled ? _cache.FetchStrategy.PPR : _cache.FetchStrategy.LoadingBoundary;
switch(fetchStrategy){
case _cache.FetchStrategy.PPR:
// Individually prefetch the static shell for each segment. This is
// the default prefetching behavior for static routes, or when PPR is
// enabled. It will not include any dynamic data.
return pingPPRRouteTree(now, task, route, tree);
case _cache.FetchStrategy.Full:
case _cache.FetchStrategy.LoadingBoundary:
{
// Prefetch multiple segments using a single dynamic request.
const spawnedEntries = new Map();
const dynamicRequestTree = diffRouteTreeAgainstCurrent(now, task, route, task.treeAtTimeOfPrefetch, tree, spawnedEntries, fetchStrategy);
const needsDynamicRequest = spawnedEntries.size > 0;
if (needsDynamicRequest) {
// Perform a dynamic prefetch request and populate the cache with
// the result
spawnPrefetchSubtask((0, _cache.fetchSegmentPrefetchesUsingDynamicRequest)(task, route, fetchStrategy, dynamicRequestTree, spawnedEntries));
}
return 2;
}
default:
fetchStrategy;
}
break;
}
default:
{
route;
}
}
return 2;
}
function pingPPRRouteTree(now, task, route, tree) {
const segment = (0, _cache.readOrCreateSegmentCacheEntry)(now, task, route, tree.key);
pingPerSegment(now, task, route, segment, task.key, tree.key);
if (tree.slots !== null) {
if (!hasNetworkBandwidth()) {
// Stop prefetching segments until there's more bandwidth.
return 0;
}
// Recursively ping the children.
for(const parallelRouteKey in tree.slots){
const childTree = tree.slots[parallelRouteKey];
const childExitStatus = pingPPRRouteTree(now, task, route, childTree);
if (childExitStatus === 0) {
// Child yielded without finishing.
return 0;
}
}
}
// This segment and all its children have finished prefetching.
return 2;
}
function diffRouteTreeAgainstCurrent(now, task, route, oldTree, newTree, spawnedEntries, fetchStrategy) {
// This is a single recursive traversal that does multiple things:
// - Finds the parts of the target route (newTree) that are not part of
// of the current page (oldTree) by diffing them, using the same algorithm
// as a real navigation.
// - Constructs a request tree (FlightRouterState) that describes which
// segments need to be prefetched and which ones are already cached.
// - Creates a set of pending cache entries for the segments that need to
// be prefetched, so that a subsequent prefetch task does not request the
// same segments again.
const oldTreeChildren = oldTree[1];
const newTreeChildren = newTree.slots;
let requestTreeChildren = {};
if (newTreeChildren !== null) {
for(const parallelRouteKey in newTreeChildren){
const newTreeChild = newTreeChildren[parallelRouteKey];
const newTreeChildSegment = newTreeChild.segment;
const oldTreeChild = oldTreeChildren[parallelRouteKey];
const oldTreeChildSegment = oldTreeChild == null ? void 0 : oldTreeChild[0];
if (oldTreeChildSegment !== undefined && (0, _matchsegments.matchSegment)(newTreeChildSegment, oldTreeChildSegment)) {
// This segment is already part of the current route. Keep traversing.
const requestTreeChild = diffRouteTreeAgainstCurrent(now, task, route, oldTreeChild, newTreeChild, spawnedEntries, fetchStrategy);
requestTreeChildren[parallelRouteKey] = requestTreeChild;
} else {
// This segment is not part of the current route. We're entering a
// part of the tree that we need to prefetch (unless everything is
// already cached).
switch(fetchStrategy){
case _cache.FetchStrategy.LoadingBoundary:
{
// When PPR is disabled, we can't prefetch per segment. We must
// fallback to the old prefetch behavior and send a dynamic request.
// Only routes that include a loading boundary can be prefetched in
// this way.
//
// This is simlar to a "full" prefetch, but we're much more
// conservative about which segments to include in the request.
//
// The server will only render up to the first loading boundary
// inside new part of the tree. If there's no loading boundary, the
// server will never return any data. TODO: When we prefetch the
// route tree, the server should indicate whether there's a loading
// boundary so the client doesn't send a second request for no
// reason.
const requestTreeChild = pingPPRDisabledRouteTreeUpToLoadingBoundary(now, task, route, newTreeChild, null, spawnedEntries);
requestTreeChildren[parallelRouteKey] = requestTreeChild;
break;
}
case _cache.FetchStrategy.Full:
{
// This is a "full" prefetch. Fetch all the data in the tree, both
// static and dynamic. We issue roughly the same request that we
// would during a real navigation. The goal is that once the
// navigation occurs, the router should not have to fetch any
// additional data.
//
// Although the response will include dynamic data, opting into a
// Full prefetch — via <Link prefetch={true}> — implicitly
// instructs the cache to treat the response as "static", or non-
// dynamic, since the whole point is to cache it for
// future navigations.
//
// Construct a tree (currently a FlightRouterState) that represents
// which segments need to be prefetched and which ones are already
// cached. If the tree is empty, then we can exit. Otherwise, we'll
// send the request tree to the server and use the response to
// populate the segment cache.
const requestTreeChild = pingRouteTreeAndIncludeDynamicData(now, task, route, newTreeChild, false, spawnedEntries);
requestTreeChildren[parallelRouteKey] = requestTreeChild;
break;
}
default:
fetchStrategy;
}
}
}
}
const requestTree = [
newTree.segment,
requestTreeChildren,
null,
null,
newTree.isRootLayout
];
return requestTree;
}
function pingPPRDisabledRouteTreeUpToLoadingBoundary(now, task, route, tree, refetchMarkerContext, spawnedEntries) {
// This function is similar to pingRouteTreeAndIncludeDynamicData, except the
// server is only going to return a minimal loading state — it will stop
// rendering at the first loading boundary. Whereas a Full prefetch is
// intentionally aggressive and tries to pretfetch all the data that will be
// needed for a navigation, a LoadingBoundary prefetch is much more
// conservative. For example, it will omit from the request tree any segment
// that is already cached, regardles of whether it's partial or full. By
// contrast, a Full prefetch will refetch partial segments.
// "inside-shared-layout" tells the server where to start looking for a
// loading boundary.
let refetchMarker = refetchMarkerContext === null ? 'inside-shared-layout' : null;
const segment = (0, _cache.readOrCreateSegmentCacheEntry)(now, task, route, tree.key);
switch(segment.status){
case _cache.EntryStatus.Empty:
{
// This segment is not cached. Add a refetch marker so the server knows
// to start rendering here.
// TODO: Instead of a "refetch" marker, we could just omit this subtree's
// FlightRouterState from the request tree. I think this would probably
// already work even without any updates to the server. For consistency,
// though, I'll send the full tree and we'll look into this later as part
// of a larger redesign of the request protocol.
// Add the pending cache entry to the result map.
spawnedEntries.set(tree.key, (0, _cache.upgradeToPendingSegment)(segment, // Set the fetch strategy to LoadingBoundary to indicate that the server
// might not include it in the pending response. If another route is able
// to issue a per-segment request, we'll do that in the background.
_cache.FetchStrategy.LoadingBoundary));
if (refetchMarkerContext !== 'refetch') {
refetchMarker = refetchMarkerContext = 'refetch';
} else {
// There's already a parent with a refetch marker, so we don't need
// to add another one.
}
break;
}
case _cache.EntryStatus.Fulfilled:
{
// The segment is already cached.
// TODO: The server should include a `hasLoading` field as part of the
// route tree prefetch.
if (segment.loading !== null) {
// This segment has a loading boundary, which means the server won't
// render its children. So there's nothing left to prefetch along this
// path. We can bail out.
return (0, _cache.convertRouteTreeToFlightRouterState)(tree);
}
break;
}
case _cache.EntryStatus.Pending:
{
break;
}
case _cache.EntryStatus.Rejected:
{
break;
}
default:
segment;
}
const requestTreeChildren = {};
if (tree.slots !== null) {
for(const parallelRouteKey in tree.slots){
const childTree = tree.slots[parallelRouteKey];
requestTreeChildren[parallelRouteKey] = pingPPRDisabledRouteTreeUpToLoadingBoundary(now, task, route, childTree, refetchMarkerContext, spawnedEntries);
}
}
const requestTree = [
tree.segment,
requestTreeChildren,
null,
refetchMarker,
tree.isRootLayout
];
return requestTree;
}
function pingRouteTreeAndIncludeDynamicData(now, task, route, tree, isInsideRefetchingParent, spawnedEntries) {
// The tree we're constructing is the same shape as the tree we're navigating
// to. But even though this is a "new" tree, some of the individual segments
// may be cached as a result of other route prefetches.
//
// So we need to find the first uncached segment along each path add an
// explicit "refetch" marker so the server knows where to start rendering.
// Once the server starts rendering along a path, it keeps rendering the
// entire subtree.
const segment = (0, _cache.readOrCreateSegmentCacheEntry)(now, task, route, tree.key);
let spawnedSegment = null;
switch(segment.status){
case _cache.EntryStatus.Empty:
{
// This segment is not cached. Include it in the request.
spawnedSegment = (0, _cache.upgradeToPendingSegment)(segment, _cache.FetchStrategy.Full);
break;
}
case _cache.EntryStatus.Fulfilled:
{
// The segment is already cached.
if (segment.isPartial) {
// The cached segment contians dynamic holes. Since this is a Full
// prefetch, we need to include it in the request.
spawnedSegment = pingFullSegmentRevalidation(now, task, route, segment, tree.key);
}
break;
}
case _cache.EntryStatus.Pending:
case _cache.EntryStatus.Rejected:
{
// There's either another prefetch currently in progress, or the previous
// attempt failed. If it wasn't a Full prefetch, fetch it again.
if (segment.fetchStrategy !== _cache.FetchStrategy.Full) {
spawnedSegment = pingFullSegmentRevalidation(now, task, route, segment, tree.key);
}
break;
}
default:
segment;
}
const requestTreeChildren = {};
if (tree.slots !== null) {
for(const parallelRouteKey in tree.slots){
const childTree = tree.slots[parallelRouteKey];
requestTreeChildren[parallelRouteKey] = pingRouteTreeAndIncludeDynamicData(now, task, route, childTree, isInsideRefetchingParent || spawnedSegment !== null, spawnedEntries);
}
}
if (spawnedSegment !== null) {
// Add the pending entry to the result map.
spawnedEntries.set(tree.key, spawnedSegment);
}
// Don't bother to add a refetch marker if one is already present in a parent.
const refetchMarker = !isInsideRefetchingParent && spawnedSegment !== null ? 'refetch' : null;
const requestTree = [
tree.segment,
requestTreeChildren,
null,
refetchMarker,
tree.isRootLayout
];
return requestTree;
}
function pingPerSegment(now, task, route, segment, routeKey, segmentKey) {
switch(segment.status){
case _cache.EntryStatus.Empty:
// Upgrade to Pending so we know there's already a request in progress
spawnPrefetchSubtask((0, _cache.fetchSegmentOnCacheMiss)(route, (0, _cache.upgradeToPendingSegment)(segment, _cache.FetchStrategy.PPR), routeKey, segmentKey));
break;
case _cache.EntryStatus.Pending:
{
// There's already a request in progress. Depending on what kind of
// request it is, we may want to revalidate it.
switch(segment.fetchStrategy){
case _cache.FetchStrategy.PPR:
case _cache.FetchStrategy.Full:
break;
case _cache.FetchStrategy.LoadingBoundary:
// There's a pending request, but because it's using the old
// prefetching strategy, we can't be sure if it will be fulfilled by
// the response — it might be inside the loading boundary. Perform
// a revalidation, but because it's speculative, wait to do it at
// background priority.
if (background(task)) {
// TODO: Instead of speculatively revalidating, consider including
// `hasLoading` in the route tree prefetch response.
pingPPRSegmentRevalidation(now, task, segment, route, routeKey, segmentKey);
}
break;
default:
segment.fetchStrategy;
}
break;
}
case _cache.EntryStatus.Rejected:
{
// The existing entry in the cache was rejected. Depending on how it
// was originally fetched, we may or may not want to revalidate it.
switch(segment.fetchStrategy){
case _cache.FetchStrategy.PPR:
case _cache.FetchStrategy.Full:
break;
case _cache.FetchStrategy.LoadingBoundary:
// There's a rejected entry, but it was fetched using the loading
// boundary strategy. So the reason it wasn't returned by the server
// might just be because it was inside a loading boundary. Or because
// there was a dynamic rewrite. Revalidate it using the per-
// segment strategy.
//
// Because a rejected segment will definitely prevent the segment (and
// all of its children) from rendering, we perform this revalidation
// immediately instead of deferring it to a background task.
pingPPRSegmentRevalidation(now, task, segment, route, routeKey, segmentKey);
break;
default:
segment.fetchStrategy;
}
break;
}
case _cache.EntryStatus.Fulfilled:
break;
default:
segment;
}
// Segments do not have dependent tasks, so once the prefetch is initiated,
// there's nothing else for us to do (except write the server data into the
// entry, which is handled by `fetchSegmentOnCacheMiss`).
}
function pingPPRSegmentRevalidation(now, task, currentSegment, route, routeKey, segmentKey) {
const revalidatingSegment = (0, _cache.readOrCreateRevalidatingSegmentEntry)(now, currentSegment);
switch(revalidatingSegment.status){
case _cache.EntryStatus.Empty:
// Spawn a prefetch request and upsert the segment into the cache
// upon completion.
upsertSegmentOnCompletion(task, route, segmentKey, spawnPrefetchSubtask((0, _cache.fetchSegmentOnCacheMiss)(route, (0, _cache.upgradeToPendingSegment)(revalidatingSegment, _cache.FetchStrategy.PPR), routeKey, segmentKey)));
break;
case _cache.EntryStatus.Pending:
break;
case _cache.EntryStatus.Fulfilled:
case _cache.EntryStatus.Rejected:
break;
default:
revalidatingSegment;
}
}
function pingFullSegmentRevalidation(now, task, route, currentSegment, segmentKey) {
const revalidatingSegment = (0, _cache.readOrCreateRevalidatingSegmentEntry)(now, currentSegment);
if (revalidatingSegment.status === _cache.EntryStatus.Empty) {
// During a Full prefetch, a single dynamic request is made for all the
// segments that we need. So we don't initiate a request here directly. By
// returning a pending entry from this function, it signals to the caller
// that this segment should be included in the request that's sent to
// the server.
const pendingSegment = (0, _cache.upgradeToPendingSegment)(revalidatingSegment, _cache.FetchStrategy.Full);
upsertSegmentOnCompletion(task, route, segmentKey, (0, _cache.waitForSegmentCacheEntry)(pendingSegment));
return pendingSegment;
} else {
// There's already a revalidation in progress.
const nonEmptyRevalidatingSegment = revalidatingSegment;
if (nonEmptyRevalidatingSegment.fetchStrategy !== _cache.FetchStrategy.Full) {
// The existing revalidation was not fetched using the Full strategy.
// Reset it and start a new revalidation.
const emptySegment = (0, _cache.resetRevalidatingSegmentEntry)(nonEmptyRevalidatingSegment);
const pendingSegment = (0, _cache.upgradeToPendingSegment)(emptySegment, _cache.FetchStrategy.Full);
upsertSegmentOnCompletion(task, route, segmentKey, (0, _cache.waitForSegmentCacheEntry)(pendingSegment));
return pendingSegment;
}
switch(nonEmptyRevalidatingSegment.status){
case _cache.EntryStatus.Pending:
// There's already an in-progress prefetch that includes this segment.
return null;
case _cache.EntryStatus.Fulfilled:
case _cache.EntryStatus.Rejected:
// A previous revalidation attempt finished, but we chose not to replace
// the existing entry in the cache. Don't try again until or unless the
// revalidation entry expires.
return null;
default:
nonEmptyRevalidatingSegment;
return null;
}
}
}
const noop = ()=>{};
function upsertSegmentOnCompletion(task, route, key, promise) {
// Wait for a segment to finish loading, then upsert it into the cache
promise.then((fulfilled)=>{
if (fulfilled !== null) {
// Received new data. Attempt to replace the existing entry in the cache.
const keypath = (0, _cache.getSegmentKeypathForTask)(task, route, key);
(0, _cache.upsertSegmentEntry)(Date.now(), keypath, fulfilled);
}
}, noop);
}
// -----------------------------------------------------------------------------
// The remainder of the module is a MinHeap implementation. Try not to put any
// logic below here unless it's related to the heap algorithm. We can extract
// this to a separate module if/when we need multiple kinds of heaps.
// -----------------------------------------------------------------------------
function compareQueuePriority(a, b) {
// Since the queue is a MinHeap, this should return a positive number if b is
// higher priority than a, and a negative number if a is higher priority
// than b.
// `priority` is an integer, where higher numbers are higher priority.
const priorityDiff = b.priority - a.priority;
if (priorityDiff !== 0) {
return priorityDiff;
}
// If the priority is the same, check which phase the prefetch is in — is it
// prefetching the route tree, or the segments? Route trees are prioritized.
const phaseDiff = b.phase - a.phase;
if (phaseDiff !== 0) {
return phaseDiff;
}
// Finally, check the insertion order. `sortId` is an incrementing counter
// assigned to prefetches. We want to process the newest prefetches first.
return b.sortId - a.sortId;
}
function heapPush(heap, node) {
const index = heap.length;
heap.push(node);
node._heapIndex = index;
heapSiftUp(heap, node, index);
}
function heapPeek(heap) {
return heap.length === 0 ? null : heap[0];
}
function heapPop(heap) {
if (heap.length === 0) {
return null;
}
const first = heap[0];
first._heapIndex = -1;
const last = heap.pop();
if (last !== first) {
heap[0] = last;
last._heapIndex = 0;
heapSiftDown(heap, last, 0);
}
return first;
}
function heapDelete(heap, node) {
const index = node._heapIndex;
if (index !== -1) {
node._heapIndex = -1;
if (heap.length !== 0) {
const last = heap.pop();
if (last !== node) {
heap[index] = last;
last._heapIndex = index;
heapSiftDown(heap, last, index);
}
}
}
}
function heapResift(heap, node) {
const index = node._heapIndex;
if (index !== -1) {
if (index === 0) {
heapSiftDown(heap, node, 0);
} else {
const parentIndex = index - 1 >>> 1;
const parent = heap[parentIndex];
if (compareQueuePriority(parent, node) > 0) {
// The parent is larger. Sift up.
heapSiftUp(heap, node, index);
} else {
// The parent is smaller (or equal). Sift down.
heapSiftDown(heap, node, index);
}
}
}
}
function heapSiftUp(heap, node, i) {
let index = i;
while(index > 0){
const parentIndex = index - 1 >>> 1;
const parent = heap[parentIndex];
if (compareQueuePriority(parent, node) > 0) {
// The parent is larger. Swap positions.
heap[parentIndex] = node;
node._heapIndex = parentIndex;
heap[index] = parent;
parent._heapIndex = index;
index = parentIndex;
} else {
// The parent is smaller. Exit.
return;
}
}
}
function heapSiftDown(heap, node, i) {
let index = i;
const length = heap.length;
const halfLength = length >>> 1;
while(index < halfLength){
const leftIndex = (index + 1) * 2 - 1;
const left = heap[leftIndex];
const rightIndex = leftIndex + 1;
const right = heap[rightIndex];
// If the left or right node is smaller, swap with the smaller of those.
if (compareQueuePriority(left, node) < 0) {
if (rightIndex < length && compareQueuePriority(right, left) < 0) {
heap[index] = right;
right._heapIndex = index;
heap[rightIndex] = node;
node._heapIndex = rightIndex;
index = rightIndex;
} else {
heap[index] = left;
left._heapIndex = index;
heap[leftIndex] = node;
node._heapIndex = leftIndex;
index = leftIndex;
}
} else if (rightIndex < length && compareQueuePriority(right, node) < 0) {
heap[index] = right;
right._heapIndex = index;
heap[rightIndex] = node;
node._heapIndex = rightIndex;
index = rightIndex;
} else {
// Neither child is smaller. Exit.
return;
}
}
}
if ((typeof exports.default === 'function' || (typeof exports.default === 'object' && exports.default !== null)) && typeof exports.default.__esModule === 'undefined') {
Object.defineProperty(exports.default, '__esModule', { value: true });
Object.assign(exports.default, exports);
module.exports = exports.default;
}
//# sourceMappingURL=scheduler.js.map
;