next
Version:
The React Framework
935 lines • 73.6 kB
JavaScript
import { HasLoadingBoundary } from '../../../shared/lib/app-router-types';
import { NEXT_DID_POSTPONE_HEADER, NEXT_ROUTER_PREFETCH_HEADER, NEXT_ROUTER_SEGMENT_PREFETCH_HEADER, NEXT_ROUTER_STALE_TIME_HEADER, NEXT_ROUTER_STATE_TREE_HEADER, NEXT_URL, RSC_CONTENT_TYPE_HEADER, RSC_HEADER } from '../app-router-headers';
import { createFetch, createFromNextReadableStream } from '../router-reducer/fetch-server-response';
import { pingPrefetchTask, isPrefetchTaskDirty, startRevalidationCooldown } from './scheduler';
import { getRouteVaryPath, getFulfilledRouteVaryPath, getSegmentVaryPathForRequest, appendLayoutVaryPath, finalizeLayoutVaryPath, finalizePageVaryPath, clonePageVaryPathWithNewSearchParams, finalizeMetadataVaryPath } from './vary-path';
import { getAppBuildId } from '../../app-build-id';
import { createHrefFromUrl } from '../router-reducer/create-href-from-url';
// TODO: Rename this module to avoid confusion with other types of cache keys
import { createCacheKey as createPrefetchRequestKey } from './cache-key';
import { doesStaticSegmentAppearInURL, getCacheKeyForDynamicParam, getRenderedPathname, getRenderedSearch, parseDynamicParamFromURLPart } from '../../route-params';
import { createCacheMap, getFromCacheMap, setInCacheMap, setSizeInCacheMap, deleteFromCacheMap, isValueExpired } from './cache-map';
import { appendSegmentRequestKeyPart, convertSegmentPathToStaticExportFilename, createSegmentRequestKeyPart, HEAD_REQUEST_KEY, ROOT_SEGMENT_REQUEST_KEY } from '../../../shared/lib/segment-cache/segment-value-encoding';
import { normalizeFlightData, prepareFlightRouterStateForRequest } from '../../flight-data-helpers';
import { STATIC_STALETIME_MS } from '../router-reducer/reducers/navigate-reducer';
import { pingVisibleLinks } from '../links';
import { PAGE_SEGMENT_KEY } from '../../../shared/lib/segment';
import { FetchStrategy } from './types';
import { createPromiseWithResolvers } from '../../../shared/lib/promise-with-resolvers';
/**
* Ensures a minimum stale time of 30s to avoid issues where the server sends a too
* short-lived stale time, which would prevent anything from being prefetched.
*/ export function getStaleTimeMs(staleTimeSeconds) {
return Math.max(staleTimeSeconds, 30) * 1000;
}
/**
* Tracks the status of a cache entry as it progresses from no data (Empty),
* waiting for server data (Pending), and finished (either Fulfilled or
* Rejected depending on the response from the server.
*/ export var EntryStatus = /*#__PURE__*/ function(EntryStatus) {
EntryStatus[EntryStatus["Empty"] = 0] = "Empty";
EntryStatus[EntryStatus["Pending"] = 1] = "Pending";
EntryStatus[EntryStatus["Fulfilled"] = 2] = "Fulfilled";
EntryStatus[EntryStatus["Rejected"] = 3] = "Rejected";
return EntryStatus;
}({});
const isOutputExportMode = process.env.NODE_ENV === 'production' && process.env.__NEXT_CONFIG_OUTPUT === 'export';
const MetadataOnlyRequestTree = [
'',
{},
null,
'metadata-only'
];
let routeCacheMap = createCacheMap();
let segmentCacheMap = createCacheMap();
// All invalidation listeners for the whole cache are tracked in single set.
// Since we don't yet support tag or path-based invalidation, there's no point
// tracking them any more granularly than this. Once we add granular
// invalidation, that may change, though generally the model is to just notify
// the listeners and allow the caller to poll the prefetch cache with a new
// prefetch task if desired.
let invalidationListeners = null;
// Incrementing counter used to track cache invalidations.
let currentCacheVersion = 0;
export function getCurrentCacheVersion() {
return currentCacheVersion;
}
/**
* Used to clear the client prefetch cache when a server action calls
* revalidatePath or revalidateTag. Eventually we will support only clearing the
* segments that were actually affected, but there's more work to be done on the
* server before the client is able to do this correctly.
*/ export function revalidateEntireCache(nextUrl, tree) {
// Increment the current cache version. This does not eagerly evict anything
// from the cache, but because all the entries are versioned, and we check
// the version when reading from the cache, this effectively causes all
// entries to be evicted lazily. We do it lazily because in the future,
// actions like revalidateTag or refresh will not evict the entire cache,
// but rather some subset of the entries.
currentCacheVersion++;
// Start a cooldown before re-prefetching to allow CDN cache propagation.
startRevalidationCooldown();
// Prefetch all the currently visible links again, to re-fill the cache.
pingVisibleLinks(nextUrl, tree);
// Similarly, notify all invalidation listeners (i.e. those passed to
// `router.prefetch(onInvalidate)`), so they can trigger a new prefetch
// if needed.
pingInvalidationListeners(nextUrl, tree);
}
function attachInvalidationListener(task) {
// This function is called whenever a prefetch task reads a cache entry. If
// the task has an onInvalidate function associated with it — i.e. the one
// optionally passed to router.prefetch(onInvalidate) — then we attach that
// listener to the every cache entry that the task reads. Then, if an entry
// is invalidated, we call the function.
if (task.onInvalidate !== null) {
if (invalidationListeners === null) {
invalidationListeners = new Set([
task
]);
} else {
invalidationListeners.add(task);
}
}
}
function notifyInvalidationListener(task) {
const onInvalidate = task.onInvalidate;
if (onInvalidate !== null) {
// Clear the callback from the task object to guarantee it's not called more
// than once.
task.onInvalidate = null;
// This is a user-space function, so we must wrap in try/catch.
try {
onInvalidate();
} catch (error) {
if (typeof reportError === 'function') {
reportError(error);
} else {
console.error(error);
}
}
}
}
export function pingInvalidationListeners(nextUrl, tree) {
// The rough equivalent of pingVisibleLinks, but for onInvalidate callbacks.
// This is called when the Next-Url or the base tree changes, since those
// may affect the result of a prefetch task. It's also called after a
// cache invalidation.
if (invalidationListeners !== null) {
const tasks = invalidationListeners;
invalidationListeners = null;
for (const task of tasks){
if (isPrefetchTaskDirty(task, nextUrl, tree)) {
notifyInvalidationListener(task);
}
}
}
}
export function readRouteCacheEntry(now, key) {
const varyPath = getRouteVaryPath(key.pathname, key.search, key.nextUrl);
const isRevalidation = false;
return getFromCacheMap(now, getCurrentCacheVersion(), routeCacheMap, varyPath, isRevalidation);
}
export function readSegmentCacheEntry(now, varyPath) {
const isRevalidation = false;
return getFromCacheMap(now, getCurrentCacheVersion(), segmentCacheMap, varyPath, isRevalidation);
}
function readRevalidatingSegmentCacheEntry(now, varyPath) {
const isRevalidation = true;
return getFromCacheMap(now, getCurrentCacheVersion(), segmentCacheMap, varyPath, isRevalidation);
}
export function waitForSegmentCacheEntry(pendingEntry) {
// Because the entry is pending, there's already a in-progress request.
// Attach a promise to the entry that will resolve when the server responds.
let promiseWithResolvers = pendingEntry.promise;
if (promiseWithResolvers === null) {
promiseWithResolvers = pendingEntry.promise = createPromiseWithResolvers();
} else {
// There's already a promise we can use
}
return promiseWithResolvers.promise;
}
/**
* Checks if an entry for a route exists in the cache. If so, it returns the
* entry, If not, it adds an empty entry to the cache and returns it.
*/ export function readOrCreateRouteCacheEntry(now, task, key) {
attachInvalidationListener(task);
const existingEntry = readRouteCacheEntry(now, key);
if (existingEntry !== null) {
return existingEntry;
}
// Create a pending entry and add it to the cache.
const pendingEntry = {
canonicalUrl: null,
status: 0,
blockedTasks: null,
tree: null,
metadata: null,
// This is initialized to true because we don't know yet whether the route
// could be intercepted. It's only set to false once we receive a response
// from the server.
couldBeIntercepted: true,
// Similarly, we don't yet know if the route supports PPR.
isPPREnabled: false,
renderedSearch: null,
// Map-related fields
ref: null,
size: 0,
// Since this is an empty entry, there's no reason to ever evict it. It will
// be updated when the data is populated.
staleAt: Infinity,
version: getCurrentCacheVersion()
};
const varyPath = getRouteVaryPath(key.pathname, key.search, key.nextUrl);
const isRevalidation = false;
setInCacheMap(routeCacheMap, varyPath, pendingEntry, isRevalidation);
return pendingEntry;
}
export function requestOptimisticRouteCacheEntry(now, requestedUrl, nextUrl) {
// This function is called during a navigation when there was no matching
// route tree in the prefetch cache. Before de-opting to a blocking,
// unprefetched navigation, we will first attempt to construct an "optimistic"
// route tree by checking the cache for similar routes.
//
// Check if there's a route with the same pathname, but with different
// search params. We can then base our optimistic route tree on this entry.
//
// Conceptually, we are simulating what would happen if we did perform a
// prefetch the requested URL, under the assumption that the server will
// not redirect or rewrite the request in a different manner than the
// base route tree. This assumption might not hold, in which case we'll have
// to recover when we perform the dynamic navigation request. However, this
// is what would happen if a route were dynamically rewritten/redirected
// in between the prefetch and the navigation. So the logic needs to exist
// to handle this case regardless.
// Look for a route with the same pathname, but with an empty search string.
// TODO: There's nothing inherently special about the empty search string;
// it's chosen somewhat arbitrarily, with the rationale that it's the most
// likely one to exist. But we should update this to match _any_ search
// string. The plan is to generalize this logic alongside other improvements
// related to "fallback" cache entries.
const requestedSearch = requestedUrl.search;
if (requestedSearch === '') {
// The caller would have already checked if a route with an empty search
// string is in the cache. So we can bail out here.
return null;
}
const urlWithoutSearchParams = new URL(requestedUrl);
urlWithoutSearchParams.search = '';
const routeWithNoSearchParams = readRouteCacheEntry(now, createPrefetchRequestKey(urlWithoutSearchParams.href, nextUrl));
if (routeWithNoSearchParams === null || routeWithNoSearchParams.status !== 2) {
// Bail out of constructing an optimistic route tree. This will result in
// a blocking, unprefetched navigation.
return null;
}
// Now we have a base route tree we can "patch" with our optimistic values.
// Optimistically assume that redirects for the requested pathname do
// not vary on the search string. Therefore, if the base route was
// redirected to a different search string, then the optimistic route
// should be redirected to the same search string. Otherwise, we use
// the requested search string.
const canonicalUrlForRouteWithNoSearchParams = new URL(routeWithNoSearchParams.canonicalUrl, requestedUrl.origin);
const optimisticCanonicalSearch = canonicalUrlForRouteWithNoSearchParams.search !== '' ? canonicalUrlForRouteWithNoSearchParams.search : requestedSearch;
// Similarly, optimistically assume that rewrites for the requested
// pathname do not vary on the search string. Therefore, if the base
// route was rewritten to a different search string, then the optimistic
// route should be rewritten to the same search string. Otherwise, we use
// the requested search string.
const optimisticRenderedSearch = routeWithNoSearchParams.renderedSearch !== '' ? routeWithNoSearchParams.renderedSearch : requestedSearch;
const optimisticUrl = new URL(routeWithNoSearchParams.canonicalUrl, location.origin);
optimisticUrl.search = optimisticCanonicalSearch;
const optimisticCanonicalUrl = createHrefFromUrl(optimisticUrl);
const optimisticRouteTree = createOptimisticRouteTree(routeWithNoSearchParams.tree, optimisticRenderedSearch);
const optimisticMetadataTree = createOptimisticRouteTree(routeWithNoSearchParams.metadata, optimisticRenderedSearch);
// Clone the base route tree, and override the relevant fields with our
// optimistic values.
const optimisticEntry = {
canonicalUrl: optimisticCanonicalUrl,
status: 2,
// This isn't cloned because it's instance-specific
blockedTasks: null,
tree: optimisticRouteTree,
metadata: optimisticMetadataTree,
couldBeIntercepted: routeWithNoSearchParams.couldBeIntercepted,
isPPREnabled: routeWithNoSearchParams.isPPREnabled,
// Override the rendered search with the optimistic value.
renderedSearch: optimisticRenderedSearch,
// Map-related fields
ref: null,
size: 0,
staleAt: routeWithNoSearchParams.staleAt,
version: routeWithNoSearchParams.version
};
// Do not insert this entry into the cache. It only exists so we can
// perform the current navigation. Just return it to the caller.
return optimisticEntry;
}
function createOptimisticRouteTree(tree, newRenderedSearch) {
// Create a new route tree that identical to the original one except for
// the rendered search string, which is contained in the vary path.
let clonedSlots = null;
const originalSlots = tree.slots;
if (originalSlots !== null) {
clonedSlots = {};
for(const parallelRouteKey in originalSlots){
const childTree = originalSlots[parallelRouteKey];
clonedSlots[parallelRouteKey] = createOptimisticRouteTree(childTree, newRenderedSearch);
}
}
// We only need to clone the vary path if the route is a page.
if (tree.isPage) {
return {
requestKey: tree.requestKey,
segment: tree.segment,
varyPath: clonePageVaryPathWithNewSearchParams(tree.varyPath, newRenderedSearch),
isPage: true,
slots: clonedSlots,
isRootLayout: tree.isRootLayout,
hasLoadingBoundary: tree.hasLoadingBoundary,
hasRuntimePrefetch: tree.hasRuntimePrefetch
};
}
return {
requestKey: tree.requestKey,
segment: tree.segment,
varyPath: tree.varyPath,
isPage: false,
slots: clonedSlots,
isRootLayout: tree.isRootLayout,
hasLoadingBoundary: tree.hasLoadingBoundary,
hasRuntimePrefetch: tree.hasRuntimePrefetch
};
}
/**
* Checks if an entry for a segment exists in the cache. If so, it returns the
* entry, If not, it adds an empty entry to the cache and returns it.
*/ export function readOrCreateSegmentCacheEntry(now, fetchStrategy, route, tree) {
const existingEntry = readSegmentCacheEntry(now, tree.varyPath);
if (existingEntry !== null) {
return existingEntry;
}
// Create a pending entry and add it to the cache.
const varyPathForRequest = getSegmentVaryPathForRequest(fetchStrategy, tree);
const pendingEntry = createDetachedSegmentCacheEntry(route.staleAt);
const isRevalidation = false;
setInCacheMap(segmentCacheMap, varyPathForRequest, pendingEntry, isRevalidation);
return pendingEntry;
}
export function readOrCreateRevalidatingSegmentEntry(now, fetchStrategy, route, tree) {
// This function is called when we've already confirmed that a particular
// segment is cached, but we want to perform another request anyway in case it
// returns more complete and/or fresher data than we already have. The logic
// for deciding whether to replace the existing entry is handled elsewhere;
// this function just handles retrieving a cache entry that we can use to
// track the revalidation.
//
// The reason revalidations are stored in the cache is because we need to be
// able to dedupe multiple revalidation requests. The reason they have to be
// handled specially is because we shouldn't overwrite a "normal" entry if
// one exists at the same keypath. So, for each internal cache location, there
// is a special "revalidation" slot that is used solely for this purpose.
//
// You can think of it as if all the revalidation entries were stored in a
// separate cache map from the canonical entries, and then transfered to the
// canonical cache map once the request is complete — this isn't how it's
// actually implemented, since it's more efficient to store them in the same
// data structure as the normal entries, but that's how it's modeled
// conceptually.
// TODO: Once we implement Fallback behavior for params, where an entry is
// re-keyed based on response information, we'll need to account for the
// possibility that the keypath of the previous entry is more generic than
// the keypath of the revalidating entry. In other words, the server could
// return a less generic entry upon revalidation. For now, though, this isn't
// a concern because the keypath is based solely on the prefetch strategy,
// not on data contained in the response.
const existingEntry = readRevalidatingSegmentCacheEntry(now, tree.varyPath);
if (existingEntry !== null) {
return existingEntry;
}
// Create a pending entry and add it to the cache.
const varyPathForRequest = getSegmentVaryPathForRequest(fetchStrategy, tree);
const pendingEntry = createDetachedSegmentCacheEntry(route.staleAt);
const isRevalidation = true;
setInCacheMap(segmentCacheMap, varyPathForRequest, pendingEntry, isRevalidation);
return pendingEntry;
}
export function overwriteRevalidatingSegmentCacheEntry(fetchStrategy, route, tree) {
// This function is called when we've already decided to replace an existing
// revalidation entry. Create a new entry and write it into the cache,
// overwriting the previous value.
const varyPathForRequest = getSegmentVaryPathForRequest(fetchStrategy, tree);
const pendingEntry = createDetachedSegmentCacheEntry(route.staleAt);
const isRevalidation = true;
setInCacheMap(segmentCacheMap, varyPathForRequest, pendingEntry, isRevalidation);
return pendingEntry;
}
export function upsertSegmentEntry(now, varyPath, candidateEntry) {
// We have a new entry that has not yet been inserted into the cache. Before
// we do so, we need to confirm whether it takes precedence over the existing
// entry (if one exists).
// TODO: We should not upsert an entry if its key was invalidated in the time
// since the request was made. We can do that by passing the "owner" entry to
// this function and confirming it's the same as `existingEntry`.
if (isValueExpired(now, getCurrentCacheVersion(), candidateEntry)) {
// The entry is expired. We cannot upsert it.
return null;
}
const existingEntry = readSegmentCacheEntry(now, varyPath);
if (existingEntry !== null) {
// Don't replace a more specific segment with a less-specific one. A case where this
// might happen is if the existing segment was fetched via
// `<Link prefetch={true}>`.
if (// We fetched the new segment using a different, less specific fetch strategy
// than the segment we already have in the cache, so it can't have more content.
candidateEntry.fetchStrategy !== existingEntry.fetchStrategy && !canNewFetchStrategyProvideMoreContent(existingEntry.fetchStrategy, candidateEntry.fetchStrategy) || // The existing entry isn't partial, but the new one is.
// (TODO: can this be true if `candidateEntry.fetchStrategy >= existingEntry.fetchStrategy`?)
!existingEntry.isPartial && candidateEntry.isPartial) {
// We're going to leave revalidating entry in the cache so that it doesn't
// get revalidated again unnecessarily. Downgrade the Fulfilled entry to
// Rejected and null out the data so it can be garbage collected. We leave
// `staleAt` intact to prevent subsequent revalidation attempts only until
// the entry expires.
const rejectedEntry = candidateEntry;
rejectedEntry.status = 3;
rejectedEntry.loading = null;
rejectedEntry.rsc = null;
return null;
}
// Evict the existing entry from the cache.
deleteFromCacheMap(existingEntry);
}
const isRevalidation = false;
setInCacheMap(segmentCacheMap, varyPath, candidateEntry, isRevalidation);
return candidateEntry;
}
export function createDetachedSegmentCacheEntry(staleAt) {
const emptyEntry = {
status: 0,
// Default to assuming the fetch strategy will be PPR. This will be updated
// when a fetch is actually initiated.
fetchStrategy: FetchStrategy.PPR,
rsc: null,
loading: null,
isPartial: true,
promise: null,
// Map-related fields
ref: null,
size: 0,
staleAt,
version: 0
};
return emptyEntry;
}
export function upgradeToPendingSegment(emptyEntry, fetchStrategy) {
const pendingEntry = emptyEntry;
pendingEntry.status = 1;
pendingEntry.fetchStrategy = fetchStrategy;
if (fetchStrategy === FetchStrategy.Full) {
// We can assume the response will contain the full segment data. Set this
// to false so we know it's OK to omit this segment from any navigation
// requests that may happen while the data is still pending.
pendingEntry.isPartial = false;
}
// Set the version here, since this is right before the request is initiated.
// The next time the global cache version is incremented, the entry will
// effectively be evicted. This happens before initiating the request, rather
// than when receiving the response, because it's guaranteed to happen
// before the data is read on the server.
pendingEntry.version = getCurrentCacheVersion();
return pendingEntry;
}
function pingBlockedTasks(entry) {
const blockedTasks = entry.blockedTasks;
if (blockedTasks !== null) {
for (const task of blockedTasks){
pingPrefetchTask(task);
}
entry.blockedTasks = null;
}
}
function fulfillRouteCacheEntry(entry, tree, metadataVaryPath, staleAt, couldBeIntercepted, canonicalUrl, renderedSearch, isPPREnabled) {
// The Head is not actually part of the route tree, but other than that, it's
// fetched and cached like a segment. Some functions expect a RouteTree
// object, so rather than fork the logic in all those places, we use this
// "fake" one.
const metadata = {
requestKey: HEAD_REQUEST_KEY,
segment: HEAD_REQUEST_KEY,
varyPath: metadataVaryPath,
// The metadata isn't really a "page" (though it isn't really a "segment"
// either) but for the purposes of how this field is used, it behaves like
// one. If this logic ever gets more complex we can change this to an enum.
isPage: true,
slots: null,
isRootLayout: false,
hasLoadingBoundary: HasLoadingBoundary.SubtreeHasNoLoadingBoundary,
hasRuntimePrefetch: false
};
const fulfilledEntry = entry;
fulfilledEntry.status = 2;
fulfilledEntry.tree = tree;
fulfilledEntry.metadata = metadata;
fulfilledEntry.staleAt = staleAt;
fulfilledEntry.couldBeIntercepted = couldBeIntercepted;
fulfilledEntry.canonicalUrl = canonicalUrl;
fulfilledEntry.renderedSearch = renderedSearch;
fulfilledEntry.isPPREnabled = isPPREnabled;
pingBlockedTasks(entry);
return fulfilledEntry;
}
function fulfillSegmentCacheEntry(segmentCacheEntry, rsc, loading, staleAt, isPartial) {
const fulfilledEntry = segmentCacheEntry;
fulfilledEntry.status = 2;
fulfilledEntry.rsc = rsc;
fulfilledEntry.loading = loading;
fulfilledEntry.staleAt = staleAt;
fulfilledEntry.isPartial = isPartial;
// Resolve any listeners that were waiting for this data.
if (segmentCacheEntry.promise !== null) {
segmentCacheEntry.promise.resolve(fulfilledEntry);
// Free the promise for garbage collection.
fulfilledEntry.promise = null;
}
return fulfilledEntry;
}
function rejectRouteCacheEntry(entry, staleAt) {
const rejectedEntry = entry;
rejectedEntry.status = 3;
rejectedEntry.staleAt = staleAt;
pingBlockedTasks(entry);
}
function rejectSegmentCacheEntry(entry, staleAt) {
const rejectedEntry = entry;
rejectedEntry.status = 3;
rejectedEntry.staleAt = staleAt;
if (entry.promise !== null) {
// NOTE: We don't currently propagate the reason the prefetch was canceled
// but we could by accepting a `reason` argument.
entry.promise.resolve(null);
entry.promise = null;
}
}
function convertRootTreePrefetchToRouteTree(rootTree, renderedPathname, renderedSearch, acc) {
// Remove trailing and leading slashes
const pathnameParts = renderedPathname.split('/').filter((p)=>p !== '');
const index = 0;
const rootSegment = ROOT_SEGMENT_REQUEST_KEY;
return convertTreePrefetchToRouteTree(rootTree.tree, rootSegment, null, ROOT_SEGMENT_REQUEST_KEY, pathnameParts, index, renderedSearch, acc);
}
function convertTreePrefetchToRouteTree(prefetch, segment, partialVaryPath, requestKey, pathnameParts, pathnamePartsIndex, renderedSearch, acc) {
// Converts the route tree sent by the server into the format used by the
// cache. The cached version of the tree includes additional fields, such as a
// cache key for each segment. Since this is frequently accessed, we compute
// it once instead of on every access. This same cache key is also used to
// request the segment from the server.
let slots = null;
let isPage;
let varyPath;
const prefetchSlots = prefetch.slots;
if (prefetchSlots !== null) {
isPage = false;
varyPath = finalizeLayoutVaryPath(requestKey, partialVaryPath);
slots = {};
for(let parallelRouteKey in prefetchSlots){
const childPrefetch = prefetchSlots[parallelRouteKey];
const childParamName = childPrefetch.name;
const childParamType = childPrefetch.paramType;
const childServerSentParamKey = childPrefetch.paramKey;
let childDoesAppearInURL;
let childSegment;
let childPartialVaryPath;
if (childParamType !== null) {
// This segment is parameterized. Get the param from the pathname.
const childParamValue = parseDynamicParamFromURLPart(childParamType, pathnameParts, pathnamePartsIndex);
// Assign a cache key to the segment, based on the param value. In the
// pre-Segment Cache implementation, the server computes this and sends
// it in the body of the response. In the Segment Cache implementation,
// the server sends an empty string and we fill it in here.
// TODO: We're intentionally not adding the search param to page
// segments here; it's tracked separately and added back during a read.
// This would clearer if we waited to construct the segment until it's
// read from the cache, since that's effectively what we're
// doing anyway.
const childParamKey = // The server omits this field from the prefetch response when
// cacheComponents is enabled.
childServerSentParamKey !== null ? childServerSentParamKey : getCacheKeyForDynamicParam(childParamValue, '');
childPartialVaryPath = appendLayoutVaryPath(partialVaryPath, childParamKey);
childSegment = [
childParamName,
childParamKey,
childParamType
];
childDoesAppearInURL = true;
} else {
// This segment does not have a param. Inherit the partial vary path of
// the parent.
childPartialVaryPath = partialVaryPath;
childSegment = childParamName;
childDoesAppearInURL = doesStaticSegmentAppearInURL(childParamName);
}
// Only increment the index if the segment appears in the URL. If it's a
// "virtual" segment, like a route group, it remains the same.
const childPathnamePartsIndex = childDoesAppearInURL ? pathnamePartsIndex + 1 : pathnamePartsIndex;
const childRequestKeyPart = createSegmentRequestKeyPart(childSegment);
const childRequestKey = appendSegmentRequestKeyPart(requestKey, parallelRouteKey, childRequestKeyPart);
slots[parallelRouteKey] = convertTreePrefetchToRouteTree(childPrefetch, childSegment, childPartialVaryPath, childRequestKey, pathnameParts, childPathnamePartsIndex, renderedSearch, acc);
}
} else {
if (requestKey.endsWith(PAGE_SEGMENT_KEY)) {
// This is a page segment.
isPage = true;
varyPath = finalizePageVaryPath(requestKey, renderedSearch, partialVaryPath);
// The metadata "segment" is not part the route tree, but it has the same
// conceptual params as a page segment. Write the vary path into the
// accumulator object. If there are multiple parallel pages, we use the
// first one. Which page we choose is arbitrary as long as it's
// consistently the same one every time every time. See
// finalizeMetadataVaryPath for more details.
if (acc.metadataVaryPath === null) {
acc.metadataVaryPath = finalizeMetadataVaryPath(requestKey, renderedSearch, partialVaryPath);
}
} else {
// This is a layout segment.
isPage = false;
varyPath = finalizeLayoutVaryPath(requestKey, partialVaryPath);
}
}
return {
requestKey,
segment,
varyPath,
// TODO: Cheating the type system here a bit because TypeScript can't tell
// that the type of isPage and varyPath are consistent. The fix would be to
// create separate constructors and call the appropriate one from each of
// the branches above. Just seems a bit overkill only for one field so I'll
// leave it as-is for now. If isPage were wrong it would break the behavior
// and we'd catch it quickly, anyway.
isPage: isPage,
slots,
isRootLayout: prefetch.isRootLayout,
// This field is only relevant to dynamic routes. For a PPR/static route,
// there's always some partial loading state we can fetch.
hasLoadingBoundary: HasLoadingBoundary.SegmentHasLoadingBoundary,
hasRuntimePrefetch: prefetch.hasRuntimePrefetch
};
}
function convertRootFlightRouterStateToRouteTree(flightRouterState, renderedSearch, acc) {
return convertFlightRouterStateToRouteTree(flightRouterState, ROOT_SEGMENT_REQUEST_KEY, null, renderedSearch, acc);
}
function convertFlightRouterStateToRouteTree(flightRouterState, requestKey, parentPartialVaryPath, renderedSearch, acc) {
const originalSegment = flightRouterState[0];
let segment;
let partialVaryPath;
let isPage;
let varyPath;
if (Array.isArray(originalSegment)) {
isPage = false;
const paramCacheKey = originalSegment[1];
partialVaryPath = appendLayoutVaryPath(parentPartialVaryPath, paramCacheKey);
varyPath = finalizeLayoutVaryPath(requestKey, partialVaryPath);
segment = originalSegment;
} else {
// This segment does not have a param. Inherit the partial vary path of
// the parent.
partialVaryPath = parentPartialVaryPath;
if (requestKey.endsWith(PAGE_SEGMENT_KEY)) {
// This is a page segment.
isPage = true;
// The navigation implementation expects the search params to be included
// in the segment. However, in the case of a static response, the search
// params are omitted. So the client needs to add them back in when reading
// from the Segment Cache.
//
// For consistency, we'll do this for dynamic responses, too.
//
// TODO: We should move search params out of FlightRouterState and handle
// them entirely on the client, similar to our plan for dynamic params.
segment = PAGE_SEGMENT_KEY;
varyPath = finalizePageVaryPath(requestKey, renderedSearch, partialVaryPath);
// The metadata "segment" is not part the route tree, but it has the same
// conceptual params as a page segment. Write the vary path into the
// accumulator object. If there are multiple parallel pages, we use the
// first one. Which page we choose is arbitrary as long as it's
// consistently the same one every time every time. See
// finalizeMetadataVaryPath for more details.
if (acc.metadataVaryPath === null) {
acc.metadataVaryPath = finalizeMetadataVaryPath(requestKey, renderedSearch, partialVaryPath);
}
} else {
// This is a layout segment.
isPage = false;
segment = originalSegment;
varyPath = finalizeLayoutVaryPath(requestKey, partialVaryPath);
}
}
let slots = null;
const parallelRoutes = flightRouterState[1];
for(let parallelRouteKey in parallelRoutes){
const childRouterState = parallelRoutes[parallelRouteKey];
const childSegment = childRouterState[0];
// TODO: Eventually, the param values will not be included in the response
// from the server. We'll instead fill them in on the client by parsing
// the URL. This is where we'll do that.
const childRequestKeyPart = createSegmentRequestKeyPart(childSegment);
const childRequestKey = appendSegmentRequestKeyPart(requestKey, parallelRouteKey, childRequestKeyPart);
const childTree = convertFlightRouterStateToRouteTree(childRouterState, childRequestKey, partialVaryPath, renderedSearch, acc);
if (slots === null) {
slots = {
[parallelRouteKey]: childTree
};
} else {
slots[parallelRouteKey] = childTree;
}
}
return {
requestKey,
segment,
varyPath,
// TODO: Cheating the type system here a bit because TypeScript can't tell
// that the type of isPage and varyPath are consistent. The fix would be to
// create separate constructors and call the appropriate one from each of
// the branches above. Just seems a bit overkill only for one field so I'll
// leave it as-is for now. If isPage were wrong it would break the behavior
// and we'd catch it quickly, anyway.
isPage: isPage,
slots,
isRootLayout: flightRouterState[4] === true,
hasLoadingBoundary: flightRouterState[5] !== undefined ? flightRouterState[5] : HasLoadingBoundary.SubtreeHasNoLoadingBoundary,
// Non-static tree responses are only used by apps that haven't adopted
// Cache Components. So this is always false.
hasRuntimePrefetch: false
};
}
export function convertRouteTreeToFlightRouterState(routeTree) {
const parallelRoutes = {};
if (routeTree.slots !== null) {
for(const parallelRouteKey in routeTree.slots){
parallelRoutes[parallelRouteKey] = convertRouteTreeToFlightRouterState(routeTree.slots[parallelRouteKey]);
}
}
const flightRouterState = [
routeTree.segment,
parallelRoutes,
null,
null,
routeTree.isRootLayout
];
return flightRouterState;
}
export async function fetchRouteOnCacheMiss(entry, task, key) {
// This function is allowed to use async/await because it contains the actual
// fetch that gets issued on a cache miss. Notice it writes the result to the
// cache entry directly, rather than return data that is then written by
// the caller.
const pathname = key.pathname;
const search = key.search;
const nextUrl = key.nextUrl;
const segmentPath = '/_tree';
const headers = {
[RSC_HEADER]: '1',
[NEXT_ROUTER_PREFETCH_HEADER]: '1',
[NEXT_ROUTER_SEGMENT_PREFETCH_HEADER]: segmentPath
};
if (nextUrl !== null) {
headers[NEXT_URL] = nextUrl;
}
try {
const url = new URL(pathname + search, location.origin);
let response;
let urlAfterRedirects;
if (isOutputExportMode) {
// In output: "export" mode, we can't use headers to request a particular
// segment. Instead, we encode the extra request information into the URL.
// This is not part of the "public" interface of the app; it's an internal
// Next.js implementation detail that the app developer should not need to
// concern themselves with.
//
// For example, to request a segment:
//
// Path passed to <Link>: /path/to/page
// Path passed to fetch: /path/to/page/__next-segments/_tree
//
// (This is not the exact protocol, just an illustration.)
//
// Before we do that, though, we need to account for redirects. Even in
// output: "export" mode, a proxy might redirect the page to a different
// location, but we shouldn't assume or expect that they also redirect all
// the segment files, too.
//
// To check whether the page is redirected, previously we perform a range
// request of 64 bytes of the HTML document to check if the target page
// is part of this app (by checking if build id matches). Only if the target
// page is part of this app do we determine the final canonical URL.
//
// However, as mentioned in https://github.com/vercel/next.js/pull/85903,
// some popular static hosting providers (like Cloudflare Pages or Render.com)
// do not support range requests, in the worst case, the entire HTML instead
// of 64 bytes could be returned, which is wasteful.
//
// So instead, we drops the check for build id here, and simply perform
// a HEAD request to rejects 1xx/4xx/5xx responses, and then determine the
// final URL after redirects.
//
// NOTE: We could embed the route tree into the HTML document, to avoid
// a second request. We're not doing that currently because it would make
// the HTML document larger and affect normal page loads.
const headResponse = await fetch(url, {
method: 'HEAD'
});
if (headResponse.status < 200 || headResponse.status >= 400) {
// The target page responded w/o a successful status code
// Could be a WAF serving a 403, or a 5xx from a backend
//
// Note that we can't use headResponse.ok here, because
// Response#ok returns `false` with 3xx responses.
rejectRouteCacheEntry(entry, Date.now() + 10 * 1000);
return null;
}
urlAfterRedirects = headResponse.redirected ? new URL(headResponse.url) : url;
response = await fetchPrefetchResponse(addSegmentPathToUrlInOutputExportMode(urlAfterRedirects, segmentPath), headers);
} else {
// "Server" mode. We can use request headers instead of the pathname.
// TODO: The eventual plan is to get rid of our custom request headers and
// encode everything into the URL, using a similar strategy to the
// "output: export" block above.
response = await fetchPrefetchResponse(url, headers);
urlAfterRedirects = response !== null && response.redirected ? new URL(response.url) : url;
}
if (!response || !response.ok || // 204 is a Cache miss. Though theoretically this shouldn't happen when
// PPR is enabled, because we always respond to route tree requests, even
// if it needs to be blockingly generated on demand.
response.status === 204 || !response.body) {
// Server responded with an error, or with a miss. We should still cache
// the response, but we can try again after 10 seconds.
rejectRouteCacheEntry(entry, Date.now() + 10 * 1000);
return null;
}
// TODO: The canonical URL is the href without the origin. I think
// historically the reason for this is because the initial canonical URL
// gets passed as a prop to the top-level React component, which means it
// needs to be computed during SSR. If it were to include the origin, it
// would need to always be same as location.origin on the client, to prevent
// a hydration mismatch. To sidestep this complexity, we omit the origin.
//
// However, since this is neither a native URL object nor a fully qualified
// URL string, we need to be careful about how we use it. To prevent subtle
// mistakes, we should create a special type for it, instead of just string.
// Or, we should just use a (readonly) URL object instead. The type of the
// prop that we pass to seed the initial state does not need to be the same
// type as the state itself.
const canonicalUrl = createHrefFromUrl(urlAfterRedirects);
// Check whether the response varies based on the Next-Url header.
const varyHeader = response.headers.get('vary');
const couldBeIntercepted = varyHeader !== null && varyHeader.includes(NEXT_URL);
// Track when the network connection closes.
const closed = createPromiseWithResolvers();
// This checks whether the response was served from the per-segment cache,
// rather than the old prefetching flow. If it fails, it implies that PPR
// is disabled on this route.
const routeIsPPREnabled = response.headers.get(NEXT_DID_POSTPONE_HEADER) === '2' || // In output: "export" mode, we can't rely on response headers. But if we
// receive a well-formed response, we can assume it's a static response,
// because all data is static in this mode.
isOutputExportMode;
if (routeIsPPREnabled) {
const prefetchStream = createPrefetchResponseStream(response.body, closed.resolve, function onResponseSizeUpdate(size) {
setSizeInCacheMap(entry, size);
});
const serverData = await createFromNextReadableStream(prefetchStream, headers);
if (serverData.buildId !== getAppBuildId()) {
// The server build does not match the client. Treat as a 404. During
// an actual navigation, the router will trigger an MPA navigation.
// TODO: Consider moving the build ID to a response header so we can check
// it before decoding the response, and so there's one way of checking
// across all response types.
// TODO: We should cache the fact that this is an MPA navigation.
rejectRouteCacheEntry(entry, Date.now() + 10 * 1000);
return null;
}
// Get the params that were used to render the target page. These may
// be different from the params in the request URL, if the page
// was rewritten.
const renderedPathname = getRenderedPathname(response);
const renderedSearch = getRenderedSearch(response);
// Convert the server-sent data into the RouteTree format used by the
// client cache.
//
// During this traversal, we accumulate additional data into this
// "accumulator" object.
const acc = {
metadataVaryPath: null
};
const routeTree = convertRootTreePrefetchToRouteTree(serverData, renderedPathname, renderedSearch, acc);
const metadataVaryPath = acc.metadataVaryPath;
if (metadataVaryPath === null) {
rejectRouteCacheEntry(entry, Date.now() + 10 * 1000);
return null;
}
const staleTimeMs = getStaleTimeMs(serverData.staleTime);
fulfillRouteCacheEntry(entry, routeTree, metadataVaryPath, Date.now() + staleTimeMs, couldBeIntercepted, canonicalUrl, renderedSearch, routeIsPPREnabled);
} else {
// PPR is not enabled for this route. The server responds with a
// different format (FlightRouterState) that we need to convert.
// TODO: We will unify the responses eventually. I'm keeping the types
// separate for now because FlightRouterState has so many
// overloaded concerns.
const prefetchStream = createPrefetchResponseStream(response.body, closed.resolve, function onResponseSizeUpdate(size) {
setSizeInCacheMap(entry, size);
});
const serverData = await createFromNextReadableStream(prefetchStream, headers);
if (serverData.b !== getAppBuildId()) {
// The server build does not match the client. Treat as a 404. During
// an actual navigation, the router will trigger an MPA navigation.
// TODO: Consider moving the build ID to a response header so we can check
// it before decoding the response, and so there's one way of checking
// across all response types.
// TODO: We should cache the fact that this is an MPA navigation.
rejectRouteCacheEntry(entry, Date.now() + 10 * 1000);
return null;
}
writeDynamicTreeResponseIntoCache(Date.now(), task, // The non-PPR response format is what we'd get if we prefetched these segments
// using the LoadingBoundary fetch strategy, so mark their cache entries accordingly.
FetchStrategy.LoadingBoundary, response, serverData, entry, couldBeIntercepted, canonicalUrl, routeIsPPREnabled);
}
if (!couldBeIntercepted) {
// This route will never be intercepted. So we can use this entry for all
// requests to this route, regardless of the Next-Url header. This works
// because when reading the cache we always check for a valid
// non-intercepted entry first.
// Re-key the entry. The `set` implementation handles removing it from
// its previous position in the cache. We don't need to do anything to
// update the LRU, because the entry is already in it.
// TODO: Treat this as an upsert — should check if an entry already
// exists at the new keypath, and if so, whether we should keep that
// one instead.
const fulfilledVaryPath = getFulfilledRouteVaryPath(pathname, search, nextUrl, couldBeIntercepted);
const isRevalidation = false;
setInCacheMap(routeCacheMap, fulfilledVaryPath, entry, isRevalidation);
}
// Return a promise that resolves when the network connection closes, so
// the scheduler can track the number of concurrent network connections.
return {
value: null,
closed: closed.promise
};
} catch (error) {
// Either the connection itself failed, or something bad happened while
// decoding the response.
rejectRouteCacheEntry(entry, Date.now() + 10 * 1000);
return null;
}
}
export async function fetchSegmentOnCacheMiss(route, segmentCacheEntry, routeKey, tree) {
// This function is allowed to use async/await because it contains the actual
// fetch that gets issued on a cache miss. Notice it writes the result to the
// cache entry directly, rather than return data that is then written by
// the caller.
//
// Segment fetches are non-blocking so we don't need to ping the scheduler
// on completion.
// Use the canonical URL to request the segment, not the original URL. These
// are usually the same, but the canonical URL will be different if the route
// tree response was redirected. To avoid an extra waterfall on every segment
// request, we pass the redirected URL instead of the original one.
const url = new URL(route.canonicalUrl, location.origin);
const nextUrl = routeKey.nextUrl;
const requ