UNPKG

next

Version:

The React Framework

964 lines 57.5 kB
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); 0 && (module.exports = { cancelPrefetchTask: null, isPrefetchTaskDirty: null, pingPrefetchTask: null, reschedulePrefetchTask: null, schedulePrefetchTask: null, startRevalidationCooldown: null }); function _export(target, all) { for(var name in all)Object.defineProperty(target, name, { enumerable: true, get: all[name] }); } _export(exports, { cancelPrefetchTask: function() { return cancelPrefetchTask; }, isPrefetchTaskDirty: function() { return isPrefetchTaskDirty; }, pingPrefetchTask: function() { return pingPrefetchTask; }, reschedulePrefetchTask: function() { return reschedulePrefetchTask; }, schedulePrefetchTask: function() { return schedulePrefetchTask; }, startRevalidationCooldown: function() { return startRevalidationCooldown; } }); const _approutertypes = require("../../../shared/lib/app-router-types"); const _matchsegments = require("../match-segments"); const _cache = require("./cache"); const _varypath = require("./vary-path"); const _cachekey = require("./cache-key"); const _types = require("./types"); const _segment = require("../../../shared/lib/segment"); const scheduleMicrotask = typeof queueMicrotask === 'function' ? queueMicrotask : (fn)=>Promise.resolve().then(fn).catch((error)=>setTimeout(()=>{ throw error; })); const taskHeap = []; let inProgressRequests = 0; let sortIdCounter = 0; let didScheduleMicrotask = false; // The most recently hovered (or touched, etc) link, i.e. the most recent task // scheduled at Intent priority. There's only ever a single task at Intent // priority at a time. We reserve special network bandwidth for this task only. let mostRecentlyHoveredLink = null; // CDN cache propagation delay after revalidation (in milliseconds) const REVALIDATION_COOLDOWN_MS = 300; // Timeout handle for the revalidation cooldown. When non-null, prefetch // requests are blocked to allow CDN cache propagation. let revalidationCooldownTimeoutHandle = null; function startRevalidationCooldown() { // Clear any existing timeout in case multiple revalidations happen // in quick succession. if (revalidationCooldownTimeoutHandle !== null) { clearTimeout(revalidationCooldownTimeoutHandle); } // Schedule the cooldown to expire after the delay. revalidationCooldownTimeoutHandle = setTimeout(()=>{ revalidationCooldownTimeoutHandle = null; // Retry the prefetch queue now that the cooldown has expired. ensureWorkIsScheduled(); }, REVALIDATION_COOLDOWN_MS); } function schedulePrefetchTask(key, treeAtTimeOfPrefetch, fetchStrategy, priority, onInvalidate) { // Spawn a new prefetch task const task = { key, treeAtTimeOfPrefetch, cacheVersion: (0, _cache.getCurrentCacheVersion)(), priority, phase: 1, hasBackgroundWork: false, spawnedRuntimePrefetches: null, fetchStrategy, sortId: sortIdCounter++, isCanceled: false, onInvalidate, _heapIndex: -1 }; trackMostRecentlyHoveredLink(task); heapPush(taskHeap, task); // Schedule an async task to process the queue. // // The main reason we process the queue in an async task is for batching. // It's common for a single JS task/event to trigger multiple prefetches. // By deferring to a microtask, we only process the queue once per JS task. // If they have different priorities, it also ensures they are processed in // the optimal order. ensureWorkIsScheduled(); return task; } function cancelPrefetchTask(task) { // Remove the prefetch task from the queue. If the task already completed, // then this is a no-op. // // We must also explicitly mark the task as canceled so that a blocked task // does not get added back to the queue when it's pinged by the network. task.isCanceled = true; heapDelete(taskHeap, task); } function reschedulePrefetchTask(task, treeAtTimeOfPrefetch, fetchStrategy, priority) { // Bump the prefetch task to the top of the queue, as if it were a fresh // task. This is essentially the same as canceling the task and scheduling // a new one, except it reuses the original object. // // The primary use case is to increase the priority of a Link-initated // prefetch on hover. // Un-cancel the task, in case it was previously canceled. task.isCanceled = false; task.phase = 1; // Assign a new sort ID to move it ahead of all other tasks at the same // priority level. (Higher sort IDs are processed first.) task.sortId = sortIdCounter++; task.priority = // If this task is the most recently hovered link, maintain its // Intent priority, even if the rescheduled priority is lower. task === mostRecentlyHoveredLink ? _types.PrefetchPriority.Intent : priority; task.treeAtTimeOfPrefetch = treeAtTimeOfPrefetch; task.fetchStrategy = fetchStrategy; trackMostRecentlyHoveredLink(task); if (task._heapIndex !== -1) { // The task is already in the queue. heapResift(taskHeap, task); } else { heapPush(taskHeap, task); } ensureWorkIsScheduled(); } function isPrefetchTaskDirty(task, nextUrl, tree) { // This is used to quickly bail out of a prefetch task if the result is // guaranteed to not have changed since the task was initiated. This is // strictly an optimization — theoretically, if it always returned true, no // behavior should change because a full prefetch task will effectively // perform the same checks. const currentCacheVersion = (0, _cache.getCurrentCacheVersion)(); return task.cacheVersion !== currentCacheVersion || task.treeAtTimeOfPrefetch !== tree || task.key.nextUrl !== nextUrl; } function trackMostRecentlyHoveredLink(task) { // Track the mostly recently hovered link, i.e. the most recently scheduled // task at Intent priority. There must only be one such task at a time. if (task.priority === _types.PrefetchPriority.Intent && task !== mostRecentlyHoveredLink) { if (mostRecentlyHoveredLink !== null) { // Bump the previously hovered link's priority down to Default. if (mostRecentlyHoveredLink.priority !== _types.PrefetchPriority.Background) { mostRecentlyHoveredLink.priority = _types.PrefetchPriority.Default; heapResift(taskHeap, mostRecentlyHoveredLink); } } mostRecentlyHoveredLink = task; } } function ensureWorkIsScheduled() { if (didScheduleMicrotask) { // Already scheduled a task to process the queue return; } didScheduleMicrotask = true; scheduleMicrotask(processQueueInMicrotask); } /** * Checks if we've exceeded the maximum number of concurrent prefetch requests, * to avoid saturating the browser's internal network queue. This is a * cooperative limit — prefetch tasks should check this before issuing * new requests. * * Also checks if we're within the revalidation cooldown window, during which * prefetch requests are delayed to allow CDN cache propagation. */ function hasNetworkBandwidth(task) { // Check if we're within the revalidation cooldown window if (revalidationCooldownTimeoutHandle !== null) { // We're within the cooldown window. Return false to prevent prefetching. // When the cooldown expires, the timeout will call ensureWorkIsScheduled() // to retry the queue. return false; } // TODO: Also check if there's an in-progress navigation. We should never // add prefetch requests to the network queue if an actual navigation is // taking place, to ensure there's sufficient bandwidth for render-blocking // data and resources. // TODO: Consider reserving some amount of bandwidth for static prefetches. if (task.priority === _types.PrefetchPriority.Intent) { // The most recently hovered link is allowed to exceed the default limit. // // The goal is to always have enough bandwidth to start a new prefetch // request when hovering over a link. // // However, because we don't abort in-progress requests, it's still possible // we'll run out of bandwidth. When links are hovered in quick succession, // there could be multiple hover requests running simultaneously. return inProgressRequests < 12; } // The default limit is lower than the limit for a hovered link. return inProgressRequests < 4; } function spawnPrefetchSubtask(prefetchSubtask) { // When the scheduler spawns an async task, we don't await its result. // Instead, the async task writes its result directly into the cache, then // pings the scheduler to continue. // // We process server responses streamingly, so the prefetch subtask will // likely resolve before we're finished receiving all the data. The subtask // result includes a promise that resolves once the network connection is // closed. The scheduler uses this to control network bandwidth by tracking // and limiting the number of concurrent requests. inProgressRequests++; return prefetchSubtask.then((result)=>{ if (result === null) { // The prefetch task errored before it could start processing the // network stream. Assume the connection is closed. onPrefetchConnectionClosed(); return null; } // Wait for the connection to close before freeing up more bandwidth. result.closed.then(onPrefetchConnectionClosed); return result.value; }); } function onPrefetchConnectionClosed() { inProgressRequests--; // Notify the scheduler that we have more bandwidth, and can continue // processing tasks. ensureWorkIsScheduled(); } function pingPrefetchTask(task) { // "Ping" a prefetch that's already in progress to notify it of new data. if (// Check if prefetch was canceled. task.isCanceled || // Check if prefetch is already queued. task._heapIndex !== -1) { return; } // Add the task back to the queue. heapPush(taskHeap, task); ensureWorkIsScheduled(); } function processQueueInMicrotask() { didScheduleMicrotask = false; // We aim to minimize how often we read the current time. Since nearly all // functions in the prefetch scheduler are synchronous, we can read the time // once and pass it as an argument wherever it's needed. const now = Date.now(); // Process the task queue until we run out of network bandwidth. let task = heapPeek(taskHeap); while(task !== null && hasNetworkBandwidth(task)){ task.cacheVersion = (0, _cache.getCurrentCacheVersion)(); const exitStatus = pingRoute(now, task); // These fields are only valid for a single attempt. Reset them after each // iteration of the task queue. const hasBackgroundWork = task.hasBackgroundWork; task.hasBackgroundWork = false; task.spawnedRuntimePrefetches = null; switch(exitStatus){ case 0: // The task yielded because there are too many requests in progress. // Stop processing tasks until we have more bandwidth. return; case 1: // The task is blocked. It needs more data before it can proceed. // Keep the task out of the queue until the server responds. heapPop(taskHeap); // Continue to the next task task = heapPeek(taskHeap); continue; case 2: if (task.phase === 1) { // Finished prefetching the route tree. Proceed to prefetching // the segments. task.phase = 0; heapResift(taskHeap, task); } else if (hasBackgroundWork) { // The task spawned additional background work. Reschedule the task // at background priority. task.priority = _types.PrefetchPriority.Background; heapResift(taskHeap, task); } else { // The prefetch is complete. Continue to the next task. heapPop(taskHeap); } task = heapPeek(taskHeap); continue; default: exitStatus; } } } /** * Check this during a prefetch task to determine if background work can be * performed. If so, it evaluates to `true`. Otherwise, it returns `false`, * while also scheduling a background task to run later. Usage: * * @example * if (background(task)) { * // Perform background-pri work * } */ function background(task) { if (task.priority === _types.PrefetchPriority.Background) { return true; } task.hasBackgroundWork = true; return false; } function pingRoute(now, task) { const key = task.key; const route = (0, _cache.readOrCreateRouteCacheEntry)(now, task, key); const exitStatus = pingRootRouteTree(now, task, route); if (exitStatus !== 0 && key.search !== '') { // If the URL has a non-empty search string, also prefetch the pathname // without the search string. We use the searchless route tree as a base for // optimistic routing; see requestOptimisticRouteCacheEntry for details. // // Note that we don't need to prefetch any of the segment data. Just the // route tree. // // TODO: This is a temporary solution; the plan is to replace this by adding // a wildcard lookup method to the TupleMap implementation. This is // non-trivial to implement because it needs to account for things like // fallback route entries, hence this temporary workaround. const url = new URL(key.pathname, location.origin); const keyWithoutSearch = (0, _cachekey.createCacheKey)(url.href, key.nextUrl); const routeWithoutSearch = (0, _cache.readOrCreateRouteCacheEntry)(now, task, keyWithoutSearch); switch(routeWithoutSearch.status){ case _cache.EntryStatus.Empty: { if (background(task)) { routeWithoutSearch.status = _cache.EntryStatus.Pending; spawnPrefetchSubtask((0, _cache.fetchRouteOnCacheMiss)(routeWithoutSearch, task, keyWithoutSearch)); } break; } case _cache.EntryStatus.Pending: case _cache.EntryStatus.Fulfilled: case _cache.EntryStatus.Rejected: { break; } default: routeWithoutSearch; } } return exitStatus; } function pingRootRouteTree(now, task, route) { switch(route.status){ case _cache.EntryStatus.Empty: { // Route is not yet cached, and there's no request already in progress. // Spawn a task to request the route, load it into the cache, and ping // the task to continue. // TODO: There are multiple strategies in the <Link> API for prefetching // a route. Currently we've only implemented the main one: per-segment, // static-data only. // // There's also `<Link prefetch={true}>` // which prefetch both static *and* dynamic data. // Similarly, we need to fallback to the old, per-page // behavior if PPR is disabled for a route (via the incremental opt-in). // // Those cases will be handled here. spawnPrefetchSubtask((0, _cache.fetchRouteOnCacheMiss)(route, task, task.key)); // If the request takes longer than a minute, a subsequent request should // retry instead of waiting for this one. When the response is received, // this value will be replaced by a new value based on the stale time sent // from the server. // TODO: We should probably also manually abort the fetch task, to reclaim // server bandwidth. route.staleAt = now + 60 * 1000; // Upgrade to Pending so we know there's already a request in progress route.status = _cache.EntryStatus.Pending; // Intentional fallthrough to the Pending branch } case _cache.EntryStatus.Pending: { // Still pending. We can't start prefetching the segments until the route // tree has loaded. Add the task to the set of blocked tasks so that it // is notified when the route tree is ready. const blockedTasks = route.blockedTasks; if (blockedTasks === null) { route.blockedTasks = new Set([ task ]); } else { blockedTasks.add(task); } return 1; } case _cache.EntryStatus.Rejected: { // Route tree failed to load. Treat as a 404. return 2; } case _cache.EntryStatus.Fulfilled: { if (task.phase !== 0) { // Do not prefetch segment data until we've entered the segment phase. return 2; } // Recursively fill in the segment tree. if (!hasNetworkBandwidth(task)) { // Stop prefetching segments until there's more bandwidth. return 0; } const tree = route.tree; // A task's fetch strategy gets set to `PPR` for any "auto" prefetch. // If it turned out that the route isn't PPR-enabled, we need to use `LoadingBoundary` instead. // We don't need to do this for runtime prefetches, because those are only available in // `cacheComponents`, where every route is PPR. const fetchStrategy = task.fetchStrategy === _types.FetchStrategy.PPR ? route.isPPREnabled ? _types.FetchStrategy.PPR : _types.FetchStrategy.LoadingBoundary : task.fetchStrategy; switch(fetchStrategy){ case _types.FetchStrategy.PPR: { // For Cache Components pages, each segment may be prefetched // statically or using a runtime request, based on various // configurations and heuristics. We'll do this in two passes: first // traverse the tree and perform all the static prefetches. // // Then, if there are any segments that need a runtime request, // do another pass to perform a runtime prefetch. pingStaticHead(now, task, route); const exitStatus = pingSharedPartOfCacheComponentsTree(now, task, route, task.treeAtTimeOfPrefetch, tree); if (exitStatus === 0) { // Child yielded without finishing. return 0; } const spawnedRuntimePrefetches = task.spawnedRuntimePrefetches; if (spawnedRuntimePrefetches !== null) { // During the first pass, we discovered segments that require a // runtime prefetch. Do a second pass to construct a request tree. const spawnedEntries = new Map(); pingRuntimeHead(now, task, route, spawnedEntries, _types.FetchStrategy.PPRRuntime); const requestTree = pingRuntimePrefetches(now, task, route, tree, spawnedRuntimePrefetches, spawnedEntries); let needsDynamicRequest = spawnedEntries.size > 0; if (needsDynamicRequest) { // Perform a dynamic prefetch request and populate the cache with // the result. spawnPrefetchSubtask((0, _cache.fetchSegmentPrefetchesUsingDynamicRequest)(task, route, _types.FetchStrategy.PPRRuntime, requestTree, spawnedEntries)); } } return 2; } case _types.FetchStrategy.Full: case _types.FetchStrategy.PPRRuntime: case _types.FetchStrategy.LoadingBoundary: { // Prefetch multiple segments using a single dynamic request. // TODO: We can consolidate this branch with previous one by modeling // it as if the first segment in the new tree has runtime prefetching // enabled. Will do this as a follow-up refactor. Might want to remove // the special metatdata case below first. In the meantime, it's not // really that much duplication, just would be nice to remove one of // these codepaths. const spawnedEntries = new Map(); pingRuntimeHead(now, task, route, spawnedEntries, fetchStrategy); const dynamicRequestTree = diffRouteTreeAgainstCurrent(now, task, route, task.treeAtTimeOfPrefetch, tree, spawnedEntries, fetchStrategy); let needsDynamicRequest = spawnedEntries.size > 0; if (needsDynamicRequest) { spawnPrefetchSubtask((0, _cache.fetchSegmentPrefetchesUsingDynamicRequest)(task, route, fetchStrategy, dynamicRequestTree, spawnedEntries)); } return 2; } default: fetchStrategy; } break; } default: { route; } } return 2; } function pingStaticHead(now, task, route) { // The Head data for a page (metadata, viewport) is not really a route // segment, in the sense that it doesn't appear in the route tree. But we // store it in the cache as if it were, using a special key. pingStaticSegmentData(now, task, route, (0, _cache.readOrCreateSegmentCacheEntry)(now, _types.FetchStrategy.PPR, route, route.metadata), task.key, route.metadata); } function pingRuntimeHead(now, task, route, spawnedEntries, fetchStrategy) { pingRouteTreeAndIncludeDynamicData(now, task, route, route.metadata, false, spawnedEntries, // When prefetching the head, there's no difference between Full // and LoadingBoundary fetchStrategy === _types.FetchStrategy.LoadingBoundary ? _types.FetchStrategy.Full : fetchStrategy); } // TODO: Rename dynamic -> runtime throughout this module function pingSharedPartOfCacheComponentsTree(now, task, route, oldTree, newTree) { // When Cache Components is enabled (or PPR, or a fully static route when PPR // is disabled; those cases are treated equivalently to Cache Components), we // start by prefetching each segment individually. Once we reach the "new" // part of the tree — the part that doesn't exist on the current page — we // may choose to switch to a runtime prefetch instead, based on the // information sent by the server in the route tree. // // The traversal starts in the "shared" part of the tree. Once we reach the // "new" part of the tree, we switch to a different traversal, // pingNewPartOfCacheComponentsTree. // Prefetch this segment's static data. const segment = (0, _cache.readOrCreateSegmentCacheEntry)(now, task.fetchStrategy, route, newTree); pingStaticSegmentData(now, task, route, segment, task.key, newTree); // Recursively ping the children. const oldTreeChildren = oldTree[1]; const newTreeChildren = newTree.slots; if (newTreeChildren !== null) { for(const parallelRouteKey in newTreeChildren){ if (!hasNetworkBandwidth(task)) { // Stop prefetching segments until there's more bandwidth. return 0; } const newTreeChild = newTreeChildren[parallelRouteKey]; const newTreeChildSegment = newTreeChild.segment; const oldTreeChild = oldTreeChildren[parallelRouteKey]; const oldTreeChildSegment = oldTreeChild?.[0]; let childExitStatus; if (oldTreeChildSegment !== undefined && doesCurrentSegmentMatchCachedSegment(route, newTreeChildSegment, oldTreeChildSegment)) { // We're still in the "shared" part of the tree. childExitStatus = pingSharedPartOfCacheComponentsTree(now, task, route, oldTreeChild, newTreeChild); } else { // We've entered the "new" part of the tree. Switch // traversal functions. childExitStatus = pingNewPartOfCacheComponentsTree(now, task, route, newTreeChild); } if (childExitStatus === 0) { // Child yielded without finishing. return 0; } } } return 2; } function pingNewPartOfCacheComponentsTree(now, task, route, tree) { // We're now prefetching in the "new" part of the tree, the part that doesn't // exist on the current page. (In other words, we're deeper than the // shared layouts.) Segments in here default to being prefetched statically. // However, if the server instructs us to, we may switch to a runtime // prefetch instead. Traverse the tree and check at each segment. if (tree.hasRuntimePrefetch) { // This route has a runtime prefetch response. Since we're below the shared // layout, everything from this point should be prefetched using a single, // combined runtime request, rather than using per-segment static requests. // This is true even if some of the child segments are known to be fully // static — once we've decided to perform a runtime prefetch, we might as // well respond with the static segments in the same roundtrip. (That's how // regular navigations work, too.) We'll still skip over segments that are // already cached, though. // // It's the server's responsibility to set a reasonable value of // `hasRuntimePrefetch`. Currently it's user-defined, but eventually, the // server may send a value of `false` even if the user opts in, if it // determines during build that the route is always fully static. There are // more optimizations we can do once we implement fallback param // tracking, too. // // Use the task object to collect the segments that need a runtime prefetch. // This will signal to the outer task queue that a second traversal is // required to construct a request tree. if (task.spawnedRuntimePrefetches === null) { task.spawnedRuntimePrefetches = new Set([ tree.requestKey ]); } else { task.spawnedRuntimePrefetches.add(tree.requestKey); } // Then exit the traversal without prefetching anything further. return 2; } // This segment should not be runtime prefetched. Prefetch its static data. const segment = (0, _cache.readOrCreateSegmentCacheEntry)(now, task.fetchStrategy, route, tree); pingStaticSegmentData(now, task, route, segment, task.key, tree); if (tree.slots !== null) { if (!hasNetworkBandwidth(task)) { // Stop prefetching segments until there's more bandwidth. return 0; } // Recursively ping the children. for(const parallelRouteKey in tree.slots){ const childTree = tree.slots[parallelRouteKey]; const childExitStatus = pingNewPartOfCacheComponentsTree(now, task, route, childTree); if (childExitStatus === 0) { // Child yielded without finishing. return 0; } } } // This segment and all its children have finished prefetching. return 2; } function diffRouteTreeAgainstCurrent(now, task, route, oldTree, newTree, spawnedEntries, fetchStrategy) { // This is a single recursive traversal that does multiple things: // - Finds the parts of the target route (newTree) that are not part of // of the current page (oldTree) by diffing them, using the same algorithm // as a real navigation. // - Constructs a request tree (FlightRouterState) that describes which // segments need to be prefetched and which ones are already cached. // - Creates a set of pending cache entries for the segments that need to // be prefetched, so that a subsequent prefetch task does not request the // same segments again. const oldTreeChildren = oldTree[1]; const newTreeChildren = newTree.slots; let requestTreeChildren = {}; if (newTreeChildren !== null) { for(const parallelRouteKey in newTreeChildren){ const newTreeChild = newTreeChildren[parallelRouteKey]; const newTreeChildSegment = newTreeChild.segment; const oldTreeChild = oldTreeChildren[parallelRouteKey]; const oldTreeChildSegment = oldTreeChild?.[0]; if (oldTreeChildSegment !== undefined && doesCurrentSegmentMatchCachedSegment(route, newTreeChildSegment, oldTreeChildSegment)) { // This segment is already part of the current route. Keep traversing. const requestTreeChild = diffRouteTreeAgainstCurrent(now, task, route, oldTreeChild, newTreeChild, spawnedEntries, fetchStrategy); requestTreeChildren[parallelRouteKey] = requestTreeChild; } else { // This segment is not part of the current route. We're entering a // part of the tree that we need to prefetch (unless everything is // already cached). switch(fetchStrategy){ case _types.FetchStrategy.LoadingBoundary: { // When PPR is disabled, we can't prefetch per segment. We must // fallback to the old prefetch behavior and send a dynamic request. // Only routes that include a loading boundary can be prefetched in // this way. // // This is simlar to a "full" prefetch, but we're much more // conservative about which segments to include in the request. // // The server will only render up to the first loading boundary // inside new part of the tree. If there's no loading boundary // anywhere in the tree, the server will never return any data, so // we can skip the request. const subtreeHasLoadingBoundary = newTreeChild.hasLoadingBoundary !== _approutertypes.HasLoadingBoundary.SubtreeHasNoLoadingBoundary; const requestTreeChild = subtreeHasLoadingBoundary ? pingPPRDisabledRouteTreeUpToLoadingBoundary(now, task, route, newTreeChild, null, spawnedEntries) : (0, _cache.convertRouteTreeToFlightRouterState)(newTreeChild); requestTreeChildren[parallelRouteKey] = requestTreeChild; break; } case _types.FetchStrategy.PPRRuntime: { // This is a runtime prefetch. Fetch all cacheable data in the tree, // not just the static PPR shell. const requestTreeChild = pingRouteTreeAndIncludeDynamicData(now, task, route, newTreeChild, false, spawnedEntries, fetchStrategy); requestTreeChildren[parallelRouteKey] = requestTreeChild; break; } case _types.FetchStrategy.Full: { // This is a "full" prefetch. Fetch all the data in the tree, both // static and dynamic. We issue roughly the same request that we // would during a real navigation. The goal is that once the // navigation occurs, the router should not have to fetch any // additional data. // // Although the response will include dynamic data, opting into a // Full prefetch — via <Link prefetch={true}> — implicitly // instructs the cache to treat the response as "static", or non- // dynamic, since the whole point is to cache it for // future navigations. // // Construct a tree (currently a FlightRouterState) that represents // which segments need to be prefetched and which ones are already // cached. If the tree is empty, then we can exit. Otherwise, we'll // send the request tree to the server and use the response to // populate the segment cache. const requestTreeChild = pingRouteTreeAndIncludeDynamicData(now, task, route, newTreeChild, false, spawnedEntries, fetchStrategy); requestTreeChildren[parallelRouteKey] = requestTreeChild; break; } default: fetchStrategy; } } } } const requestTree = [ newTree.segment, requestTreeChildren, null, null, newTree.isRootLayout ]; return requestTree; } function pingPPRDisabledRouteTreeUpToLoadingBoundary(now, task, route, tree, refetchMarkerContext, spawnedEntries) { // This function is similar to pingRouteTreeAndIncludeDynamicData, except the // server is only going to return a minimal loading state — it will stop // rendering at the first loading boundary. Whereas a Full prefetch is // intentionally aggressive and tries to pretfetch all the data that will be // needed for a navigation, a LoadingBoundary prefetch is much more // conservative. For example, it will omit from the request tree any segment // that is already cached, regardles of whether it's partial or full. By // contrast, a Full prefetch will refetch partial segments. // "inside-shared-layout" tells the server where to start looking for a // loading boundary. let refetchMarker = refetchMarkerContext === null ? 'inside-shared-layout' : null; const segment = (0, _cache.readOrCreateSegmentCacheEntry)(now, task.fetchStrategy, route, tree); switch(segment.status){ case _cache.EntryStatus.Empty: { // This segment is not cached. Add a refetch marker so the server knows // to start rendering here. // TODO: Instead of a "refetch" marker, we could just omit this subtree's // FlightRouterState from the request tree. I think this would probably // already work even without any updates to the server. For consistency, // though, I'll send the full tree and we'll look into this later as part // of a larger redesign of the request protocol. // Add the pending cache entry to the result map. spawnedEntries.set(tree.requestKey, (0, _cache.upgradeToPendingSegment)(segment, // Set the fetch strategy to LoadingBoundary to indicate that the server // might not include it in the pending response. If another route is able // to issue a per-segment request, we'll do that in the background. _types.FetchStrategy.LoadingBoundary)); if (refetchMarkerContext !== 'refetch') { refetchMarker = refetchMarkerContext = 'refetch'; } else { // There's already a parent with a refetch marker, so we don't need // to add another one. } break; } case _cache.EntryStatus.Fulfilled: { // The segment is already cached. const segmentHasLoadingBoundary = tree.hasLoadingBoundary === _approutertypes.HasLoadingBoundary.SegmentHasLoadingBoundary; if (segmentHasLoadingBoundary) { // This segment has a loading boundary, which means the server won't // render its children. So there's nothing left to prefetch along this // path. We can bail out. return (0, _cache.convertRouteTreeToFlightRouterState)(tree); } break; } case _cache.EntryStatus.Pending: { break; } case _cache.EntryStatus.Rejected: { break; } default: segment; } const requestTreeChildren = {}; if (tree.slots !== null) { for(const parallelRouteKey in tree.slots){ const childTree = tree.slots[parallelRouteKey]; requestTreeChildren[parallelRouteKey] = pingPPRDisabledRouteTreeUpToLoadingBoundary(now, task, route, childTree, refetchMarkerContext, spawnedEntries); } } const requestTree = [ tree.segment, requestTreeChildren, null, refetchMarker, tree.isRootLayout ]; return requestTree; } function pingRouteTreeAndIncludeDynamicData(now, task, route, tree, isInsideRefetchingParent, spawnedEntries, fetchStrategy) { // The tree we're constructing is the same shape as the tree we're navigating // to. But even though this is a "new" tree, some of the individual segments // may be cached as a result of other route prefetches. // // So we need to find the first uncached segment along each path add an // explicit "refetch" marker so the server knows where to start rendering. // Once the server starts rendering along a path, it keeps rendering the // entire subtree. const segment = (0, _cache.readOrCreateSegmentCacheEntry)(now, // Note that `fetchStrategy` might be different from `task.fetchStrategy`, // and we have to use the former here. // We can have a task with `FetchStrategy.PPR` where some of its segments are configured to // always use runtime prefetching (via `export const prefetch`), and those should check for // entries that include search params. fetchStrategy, route, tree); let spawnedSegment = null; switch(segment.status){ case _cache.EntryStatus.Empty: { // This segment is not cached. Include it in the request. spawnedSegment = (0, _cache.upgradeToPendingSegment)(segment, fetchStrategy); break; } case _cache.EntryStatus.Fulfilled: { // The segment is already cached. if (segment.isPartial && (0, _cache.canNewFetchStrategyProvideMoreContent)(segment.fetchStrategy, fetchStrategy)) { // The cached segment contains dynamic holes, and was prefetched using a less specific strategy than the current one. // This means we're in one of these cases: // - we have a static prefetch, and we're doing a runtime prefetch // - we have a static or runtime prefetch, and we're doing a Full prefetch (or a navigation). // In either case, we need to include it in the request to get a more specific (or full) version. spawnedSegment = pingFullSegmentRevalidation(now, route, tree, fetchStrategy); } break; } case _cache.EntryStatus.Pending: case _cache.EntryStatus.Rejected: { // There's either another prefetch currently in progress, or the previous // attempt failed. If the new strategy can provide more content, fetch it again. if ((0, _cache.canNewFetchStrategyProvideMoreContent)(segment.fetchStrategy, fetchStrategy)) { spawnedSegment = pingFullSegmentRevalidation(now, route, tree, fetchStrategy); } break; } default: segment; } const requestTreeChildren = {}; if (tree.slots !== null) { for(const parallelRouteKey in tree.slots){ const childTree = tree.slots[parallelRouteKey]; requestTreeChildren[parallelRouteKey] = pingRouteTreeAndIncludeDynamicData(now, task, route, childTree, isInsideRefetchingParent || spawnedSegment !== null, spawnedEntries, fetchStrategy); } } if (spawnedSegment !== null) { // Add the pending entry to the result map. spawnedEntries.set(tree.requestKey, spawnedSegment); } // Don't bother to add a refetch marker if one is already present in a parent. const refetchMarker = !isInsideRefetchingParent && spawnedSegment !== null ? 'refetch' : null; const requestTree = [ tree.segment, requestTreeChildren, null, refetchMarker, tree.isRootLayout ]; return requestTree; } function pingRuntimePrefetches(now, task, route, tree, spawnedRuntimePrefetches, spawnedEntries) { // Construct a request tree (FlightRouterState) for a runtime prefetch. If // a segment is part of the runtime prefetch, the tree is constructed by // diffing against what's already in the prefetch cache. Otherwise, we send // a regular FlightRouterState with no special markers. // // See pingRouteTreeAndIncludeDynamicData for details. if (spawnedRuntimePrefetches.has(tree.requestKey)) { // This segment needs a runtime prefetch. return pingRouteTreeAndIncludeDynamicData(now, task, route, tree, false, spawnedEntries, _types.FetchStrategy.PPRRuntime); } let requestTreeChildren = {}; const slots = tree.slots; if (slots !== null) { for(const parallelRouteKey in slots){ const childTree = slots[parallelRouteKey]; requestTreeChildren[parallelRouteKey] = pingRuntimePrefetches(now, task, route, childTree, spawnedRuntimePrefetches, spawnedEntries); } } // This segment is not part of the runtime prefetch. Clone the base tree. const requestTree = [ tree.segment, requestTreeChildren, null, null ]; return requestTree; } function pingStaticSegmentData(now, task, route, segment, routeKey, tree) { switch(segment.status){ case _cache.EntryStatus.Empty: // Upgrade to Pending so we know there's already a request in progress spawnPrefetchSubtask((0, _cache.fetchSegmentOnCacheMiss)(route, (0, _cache.upgradeToPendingSegment)(segment, _types.FetchStrategy.PPR), routeKey, tree)); break; case _cache.EntryStatus.Pending: { // There's already a request in progress. Depending on what kind of // request it is, we may want to revalidate it. switch(segment.fetchStrategy){ case _types.FetchStrategy.PPR: case _types.FetchStrategy.PPRRuntime: case _types.FetchStrategy.Full: break; case _types.FetchStrategy.LoadingBoundary: // There's a pending request, but because it's using the old // prefetching strategy, we can't be sure if it will be fulfilled by // the response — it might be inside the loading boundary. Perform // a revalidation, but because it's speculative, wait to do it at // background priority. if (background(task)) { // TODO: Instead of speculatively revalidating, consider including // `hasLoading` in the route tree prefetch response. pingPPRSegmentRevalidation(now, route, routeKey, tree); } break; default: segment.fetchStrategy; } break; } case _cache.EntryStatus.Rejected: { // The existing entry in the cache was rejected. Depending on how it // was originally fetched, we may or may not want to revalidate it. switch(segment.fetchStrategy){ case _types.FetchStrategy.PPR: case _types.FetchStrategy.PPRRuntime: case _types.FetchStrategy.Full: break; case _types.FetchStrategy.LoadingBoundary: // There's a rejected entry, but it was fetched using the loading // boundary strategy. So the reason it wasn't returned by the server // might just be because it was inside a loading boundary. Or because // there was a dynamic rewrite. Revalidate it using the per- // segment strategy. // // Because a rejected segment will definitely prevent the segment (and // all of its children) from rendering, we perform this revalidation // immediately instead of deferring it to a background task. pingPPRSegmentRevalidation(now, route, routeKey, tree); break; default: segment.fetchStrategy; } break; } case _cache.EntryStatus.Fulfilled: break; default: segment; } // Segments do not have dependent tasks, so once the prefetch is initiated, // there's nothing else for us to do (except write the server data into the // entry, which is handled by `fetchSegmentOnCacheMiss`). } function pingPPRSegmentRevalidation(now, route, routeKey, tree) { const revalidatingSegment = (0, _cache.readOrCreateRevalidatingSegmentEntry)(now, _types.FetchStrategy.PPR, route, tree); switch(revalidatingSegment.status){ case _cache.EntryStatus.Empty: // Spawn a prefetch request and upsert the segment into the cache // upon completion. upsertSegmentOnCompletion(spawnPrefetchSubtask((0, _cache.fetchSegmentOnCacheMiss)(route, (0, _cache.upgradeToPendingSegment)(revalidatingSegment, _types.FetchStrategy.PPR), routeKey, tree)), (0, _varypath.getSegmentVaryPathForRequest)(_types.FetchStrategy.PPR, tree)); break; case _cache.EntryStatus.Pending: break; case _cache.EntryStatus.Fulfilled: case _cache.EntryStatus.Rejected: break; default: revalidatingSegment; } } function pingFullSegmentRevalidation(now, route, tree, fetchStrategy) { const revalidatingSegment = (0, _cache.readOrCreateRevalidatingSegmentEntry)(now, fetchStrategy, route, tree); if (revalidatingSegment.status === _cache.EntryStatus.Empty) { // During a Full/PPRRuntime prefetch, a single dynamic request is made for all the // segments that we need. So we don't initiate a request here directly. By // returning a pending entry from this function, it signals to the caller // that this segment should be included in the request that's sent to // the server. const pendingSegment = (0, _cache.upgradeToPendingSegment)(revalidatingSegment, fetchStrategy); upsertSegmentOnCompletion((0, _cache.waitForSegmentCacheEntry)(pendingSegment), (0, _varypath.getSegmentVaryPathForRequest)(fetchStrategy, tree)); return pendingSegment; } else { // There's already a revalidation in progress. const nonEmptyRevalidatingSegment = revalidatingSegment; if ((0, _cache.canNewFetchStrategyProvideMoreContent)(nonEmptyRevalidatingSegment.fetchStrategy, fetchStrategy)) { // The existing revalidation was fetched using a less specific strategy. // Reset it and start a new revalidation. const emptySegment = (0, _cache.overwriteRevalidatingSegmentCacheEntry)(fetchStrategy, route, tree); const pendingSegment = (0, _cache.upgradeToPendingSegment)(emptySegment, fetchStrategy); upsertSegmentOnCompletion((0, _cache.waitForSegmentCacheEntry)(pendingSegment), (0, _varypath.getSegmentVa