@apollo/client
Version:
A fully-featured caching GraphQL client.
399 lines • 19.9 kB
JavaScript
import { __assign } from "tslib";
import { equal } from "@wry/equality";
import { DeepMerger } from "../utilities/index.js";
import { mergeIncrementalData } from "../utilities/index.js";
import { reobserveCacheFirst } from "./ObservableQuery.js";
import { isNonEmptyArray, graphQLResultHasError, canUseWeakMap, } from "../utilities/index.js";
import { NetworkStatus, isNetworkRequestInFlight } from "./networkStatus.js";
var destructiveMethodCounts = new (canUseWeakMap ? WeakMap : Map)();
function wrapDestructiveCacheMethod(cache, methodName) {
var original = cache[methodName];
if (typeof original === "function") {
// @ts-expect-error this is just too generic to be typed correctly
cache[methodName] = function () {
destructiveMethodCounts.set(cache,
// The %1e15 allows the count to wrap around to 0 safely every
// quadrillion evictions, so there's no risk of overflow. To be
// clear, this is more of a pedantic principle than something
// that matters in any conceivable practical scenario.
(destructiveMethodCounts.get(cache) + 1) % 1e15);
// @ts-expect-error this is just too generic to be typed correctly
return original.apply(this, arguments);
};
}
}
function cancelNotifyTimeout(info) {
if (info["notifyTimeout"]) {
clearTimeout(info["notifyTimeout"]);
info["notifyTimeout"] = void 0;
}
}
// A QueryInfo object represents a single query managed by the
// QueryManager, which tracks all QueryInfo objects by queryId in its
// this.queries Map. QueryInfo objects store the latest results and errors
// for the given query, and are responsible for reporting those results to
// the corresponding ObservableQuery, via the QueryInfo.notify method.
// Results are reported asynchronously whenever setDiff marks the
// QueryInfo object as dirty, though a call to the QueryManager's
// broadcastQueries method may trigger the notification before it happens
// automatically. This class used to be a simple interface type without
// any field privacy or meaningful methods, which is why it still has so
// many public fields. The effort to lock down and simplify the QueryInfo
// interface is ongoing, and further improvements are welcome.
var QueryInfo = /** @class */ (function () {
function QueryInfo(queryManager, queryId) {
if (queryId === void 0) { queryId = queryManager.generateQueryId(); }
this.queryId = queryId;
this.listeners = new Set();
this.document = null;
this.lastRequestId = 1;
this.stopped = false;
this.dirty = false;
this.observableQuery = null;
var cache = (this.cache = queryManager.cache);
// Track how often cache.evict is called, since we want eviction to
// override the feud-stopping logic in the markResult method, by
// causing shouldWrite to return true. Wrapping the cache.evict method
// is a bit of a hack, but it saves us from having to make eviction
// counting an official part of the ApolloCache API.
if (!destructiveMethodCounts.has(cache)) {
destructiveMethodCounts.set(cache, 0);
wrapDestructiveCacheMethod(cache, "evict");
wrapDestructiveCacheMethod(cache, "modify");
wrapDestructiveCacheMethod(cache, "reset");
}
}
QueryInfo.prototype.init = function (query) {
var networkStatus = query.networkStatus || NetworkStatus.loading;
if (this.variables &&
this.networkStatus !== NetworkStatus.loading &&
!equal(this.variables, query.variables)) {
networkStatus = NetworkStatus.setVariables;
}
if (!equal(query.variables, this.variables)) {
this.lastDiff = void 0;
}
Object.assign(this, {
document: query.document,
variables: query.variables,
networkError: null,
graphQLErrors: this.graphQLErrors || [],
networkStatus: networkStatus,
});
if (query.observableQuery) {
this.setObservableQuery(query.observableQuery);
}
if (query.lastRequestId) {
this.lastRequestId = query.lastRequestId;
}
return this;
};
QueryInfo.prototype.reset = function () {
cancelNotifyTimeout(this);
this.dirty = false;
};
QueryInfo.prototype.resetDiff = function () {
this.lastDiff = void 0;
};
QueryInfo.prototype.getDiff = function () {
var options = this.getDiffOptions();
if (this.lastDiff && equal(options, this.lastDiff.options)) {
return this.lastDiff.diff;
}
this.updateWatch(this.variables);
var oq = this.observableQuery;
if (oq && oq.options.fetchPolicy === "no-cache") {
return { complete: false };
}
var diff = this.cache.diff(options);
this.updateLastDiff(diff, options);
return diff;
};
QueryInfo.prototype.updateLastDiff = function (diff, options) {
this.lastDiff =
diff ?
{
diff: diff,
options: options || this.getDiffOptions(),
}
: void 0;
};
QueryInfo.prototype.getDiffOptions = function (variables) {
var _a;
if (variables === void 0) { variables = this.variables; }
return {
query: this.document,
variables: variables,
returnPartialData: true,
optimistic: true,
canonizeResults: (_a = this.observableQuery) === null || _a === void 0 ? void 0 : _a.options.canonizeResults,
};
};
QueryInfo.prototype.setDiff = function (diff) {
var _this = this;
var _a;
var oldDiff = this.lastDiff && this.lastDiff.diff;
// If we are trying to deliver an incomplete cache result, we avoid
// reporting it if the query has errored, otherwise we let the broadcast try
// and repair the partial result by refetching the query. This check avoids
// a situation where a query that errors and another succeeds with
// overlapping data does not report the partial data result to the errored
// query.
//
// See https://github.com/apollographql/apollo-client/issues/11400 for more
// information on this issue.
if (diff && !diff.complete && ((_a = this.observableQuery) === null || _a === void 0 ? void 0 : _a.getLastError())) {
return;
}
this.updateLastDiff(diff);
if (!this.dirty && !equal(oldDiff && oldDiff.result, diff && diff.result)) {
this.dirty = true;
if (!this.notifyTimeout) {
this.notifyTimeout = setTimeout(function () { return _this.notify(); }, 0);
}
}
};
QueryInfo.prototype.setObservableQuery = function (oq) {
var _this = this;
if (oq === this.observableQuery)
return;
if (this.oqListener) {
this.listeners.delete(this.oqListener);
}
this.observableQuery = oq;
if (oq) {
oq["queryInfo"] = this;
this.listeners.add((this.oqListener = function () {
var diff = _this.getDiff();
if (diff.fromOptimisticTransaction) {
// If this diff came from an optimistic transaction, deliver the
// current cache data to the ObservableQuery, but don't perform a
// reobservation, since oq.reobserveCacheFirst might make a network
// request, and we never want to trigger network requests in the
// middle of optimistic updates.
oq["observe"]();
}
else {
// Otherwise, make the ObservableQuery "reobserve" the latest data
// using a temporary fetch policy of "cache-first", so complete cache
// results have a chance to be delivered without triggering additional
// network requests, even when options.fetchPolicy is "network-only"
// or "cache-and-network". All other fetch policies are preserved by
// this method, and are handled by calling oq.reobserve(). If this
// reobservation is spurious, isDifferentFromLastResult still has a
// chance to catch it before delivery to ObservableQuery subscribers.
reobserveCacheFirst(oq);
}
}));
}
else {
delete this.oqListener;
}
};
QueryInfo.prototype.notify = function () {
var _this = this;
cancelNotifyTimeout(this);
if (this.shouldNotify()) {
this.listeners.forEach(function (listener) { return listener(_this); });
}
this.dirty = false;
};
QueryInfo.prototype.shouldNotify = function () {
if (!this.dirty || !this.listeners.size) {
return false;
}
if (isNetworkRequestInFlight(this.networkStatus) && this.observableQuery) {
var fetchPolicy = this.observableQuery.options.fetchPolicy;
if (fetchPolicy !== "cache-only" && fetchPolicy !== "cache-and-network") {
return false;
}
}
return true;
};
QueryInfo.prototype.stop = function () {
if (!this.stopped) {
this.stopped = true;
// Cancel the pending notify timeout
this.reset();
this.cancel();
// Revert back to the no-op version of cancel inherited from
// QueryInfo.prototype.
this.cancel = QueryInfo.prototype.cancel;
var oq = this.observableQuery;
if (oq)
oq.stopPolling();
}
};
// This method is a no-op by default, until/unless overridden by the
// updateWatch method.
QueryInfo.prototype.cancel = function () { };
QueryInfo.prototype.updateWatch = function (variables) {
var _this = this;
if (variables === void 0) { variables = this.variables; }
var oq = this.observableQuery;
if (oq && oq.options.fetchPolicy === "no-cache") {
return;
}
var watchOptions = __assign(__assign({}, this.getDiffOptions(variables)), { watcher: this, callback: function (diff) { return _this.setDiff(diff); } });
if (!this.lastWatch || !equal(watchOptions, this.lastWatch)) {
this.cancel();
this.cancel = this.cache.watch((this.lastWatch = watchOptions));
}
};
QueryInfo.prototype.resetLastWrite = function () {
this.lastWrite = void 0;
};
QueryInfo.prototype.shouldWrite = function (result, variables) {
var lastWrite = this.lastWrite;
return !(lastWrite &&
// If cache.evict has been called since the last time we wrote this
// data into the cache, there's a chance writing this result into
// the cache will repair what was evicted.
lastWrite.dmCount === destructiveMethodCounts.get(this.cache) &&
equal(variables, lastWrite.variables) &&
equal(result.data, lastWrite.result.data));
};
QueryInfo.prototype.markResult = function (result, document, options, cacheWriteBehavior) {
var _this = this;
var merger = new DeepMerger();
var graphQLErrors = isNonEmptyArray(result.errors) ? result.errors.slice(0) : [];
// Cancel the pending notify timeout (if it exists) to prevent extraneous network
// requests. To allow future notify timeouts, diff and dirty are reset as well.
this.reset();
if ("incremental" in result && isNonEmptyArray(result.incremental)) {
var mergedData = mergeIncrementalData(this.getDiff().result, result);
result.data = mergedData;
// Detect the first chunk of a deferred query and merge it with existing
// cache data. This ensures a `cache-first` fetch policy that returns
// partial cache data or a `cache-and-network` fetch policy that already
// has full data in the cache does not complain when trying to merge the
// initial deferred server data with existing cache data.
}
else if ("hasNext" in result && result.hasNext) {
var diff = this.getDiff();
result.data = merger.merge(diff.result, result.data);
}
this.graphQLErrors = graphQLErrors;
if (options.fetchPolicy === "no-cache") {
this.updateLastDiff({ result: result.data, complete: true }, this.getDiffOptions(options.variables));
}
else if (cacheWriteBehavior !== 0 /* CacheWriteBehavior.FORBID */) {
if (shouldWriteResult(result, options.errorPolicy)) {
// Using a transaction here so we have a chance to read the result
// back from the cache before the watch callback fires as a result
// of writeQuery, so we can store the new diff quietly and ignore
// it when we receive it redundantly from the watch callback.
this.cache.performTransaction(function (cache) {
if (_this.shouldWrite(result, options.variables)) {
cache.writeQuery({
query: document,
data: result.data,
variables: options.variables,
overwrite: cacheWriteBehavior === 1 /* CacheWriteBehavior.OVERWRITE */,
});
_this.lastWrite = {
result: result,
variables: options.variables,
dmCount: destructiveMethodCounts.get(_this.cache),
};
}
else {
// If result is the same as the last result we received from
// the network (and the variables match too), avoid writing
// result into the cache again. The wisdom of skipping this
// cache write is far from obvious, since any cache write
// could be the one that puts the cache back into a desired
// state, fixing corruption or missing data. However, if we
// always write every network result into the cache, we enable
// feuds between queries competing to update the same data in
// incompatible ways, which can lead to an endless cycle of
// cache broadcasts and useless network requests. As with any
// feud, eventually one side must step back from the brink,
// letting the other side(s) have the last word(s). There may
// be other points where we could break this cycle, such as
// silencing the broadcast for cache.writeQuery (not a good
// idea, since it just delays the feud a bit) or somehow
// avoiding the network request that just happened (also bad,
// because the server could return useful new data). All
// options considered, skipping this cache write seems to be
// the least damaging place to break the cycle, because it
// reflects the intuition that we recently wrote this exact
// result into the cache, so the cache *should* already/still
// contain this data. If some other query has clobbered that
// data in the meantime, that's too bad, but there will be no
// winners if every query blindly reverts to its own version
// of the data. This approach also gives the network a chance
// to return new data, which will be written into the cache as
// usual, notifying only those queries that are directly
// affected by the cache updates, as usual. In the future, an
// even more sophisticated cache could perhaps prevent or
// mitigate the clobbering somehow, but that would make this
// particular cache write even less important, and thus
// skipping it would be even safer than it is today.
if (_this.lastDiff && _this.lastDiff.diff.complete) {
// Reuse data from the last good (complete) diff that we
// received, when possible.
result.data = _this.lastDiff.diff.result;
return;
}
// If the previous this.diff was incomplete, fall through to
// re-reading the latest data with cache.diff, below.
}
var diffOptions = _this.getDiffOptions(options.variables);
var diff = cache.diff(diffOptions);
// In case the QueryManager stops this QueryInfo before its
// results are delivered, it's important to avoid restarting the
// cache watch when markResult is called. We also avoid updating
// the watch if we are writing a result that doesn't match the current
// variables to avoid race conditions from broadcasting the wrong
// result.
if (!_this.stopped && equal(_this.variables, options.variables)) {
// Any time we're about to update this.diff, we need to make
// sure we've started watching the cache.
_this.updateWatch(options.variables);
}
// If we're allowed to write to the cache, and we can read a
// complete result from the cache, update result.data to be the
// result from the cache, rather than the raw network result.
// Set without setDiff to avoid triggering a notify call, since
// we have other ways of notifying for this result.
_this.updateLastDiff(diff, diffOptions);
if (diff.complete) {
result.data = diff.result;
}
});
}
else {
this.lastWrite = void 0;
}
}
};
QueryInfo.prototype.markReady = function () {
this.networkError = null;
return (this.networkStatus = NetworkStatus.ready);
};
QueryInfo.prototype.markError = function (error) {
this.networkStatus = NetworkStatus.error;
this.lastWrite = void 0;
this.reset();
if (error.graphQLErrors) {
this.graphQLErrors = error.graphQLErrors;
}
if (error.networkError) {
this.networkError = error.networkError;
}
return error;
};
return QueryInfo;
}());
export { QueryInfo };
export function shouldWriteResult(result, errorPolicy) {
if (errorPolicy === void 0) { errorPolicy = "none"; }
var ignoreErrors = errorPolicy === "ignore" || errorPolicy === "all";
var writeWithErrors = !graphQLResultHasError(result);
if (!writeWithErrors && ignoreErrors && result.data) {
writeWithErrors = true;
}
return writeWithErrors;
}
//# sourceMappingURL=QueryInfo.js.map