mirror of
https://github.com/status-im/sourcecred.git
synced 2025-02-05 01:04:53 +00:00
mirror: paginate own-data node updates (#908)
Summary: GitHub has an undocumented node limit on the number of IDs that can be provided to the top-level `nodes` connection. This is silly, because we can just spread the IDs over multiple identical connections. This commit implements the logic to do so. Test Plan: Create some queries that use `nodes(ids: ...)` to fetch varying numbers of objects: ```shell id="MDEwOlJlcG9zaXRvcnkxMjAxNDU1NzA=" nodes() { n="$1" ids="$(yes "$id" | head -n "$n" | jq -R . | jq -sc .)" printf 'nodes(ids: %s) { __typename }' "$ids" } query() { printf '{ ' for num; do printf 'nodes_%s: %s ' "$num" "$(nodes "$num")" done printf '}' } ``` Note that the query given by `query 101` results in an error… ```json { "data": null, "errors": [ { "message": "You may not provide more than 100 node ids; you provided 101.", "type": "ARGUMENT_LIMIT", "path": [ "nodes_101" ], "locations": [ { "line": 1, "column": 3 } ] } ] } ``` …but the query given by `query 98 99` happily returns 197 node entries. wchargin-branch: mirror-paginate-own-data
This commit is contained in:
parent
1b09a7f61b
commit
23e56f481a
@ -471,16 +471,36 @@ export class Mirror {
|
||||
_queryFromPlan(
|
||||
queryPlan: QueryPlan,
|
||||
options: {|
|
||||
// When fetching own-data for nodes of a given type, how many
|
||||
// nodes may we fetch at once?
|
||||
+nodesOfTypeLimit: number,
|
||||
// For how many nodes may we fetch own-data at once, across all
|
||||
// types?
|
||||
+nodesLimit: number,
|
||||
// How many connections may we fetch at top level?
|
||||
+connectionLimit: number,
|
||||
// When fetching entries in a connection, how many entities may we
|
||||
// request at once? (What is the `first` argument?)
|
||||
+connectionPageSize: number,
|
||||
|}
|
||||
): Queries.Selection[] {
|
||||
// Group objects by type, so that we only have to specify each
|
||||
// type's fieldset once.
|
||||
// Group objects by type, so that we have to specify each type's
|
||||
// fieldset fewer times (only once per `nodesOfTypeLimit` nodes
|
||||
// instead of for every node).
|
||||
const objectsByType: Map<Schema.Typename, Schema.ObjectId[]> = new Map();
|
||||
for (const object of queryPlan.objects) {
|
||||
for (const object of queryPlan.objects.slice(0, options.nodesLimit)) {
|
||||
MapUtil.pushValue(objectsByType, object.typename, object.id);
|
||||
}
|
||||
const paginatedObjectsByType: Array<{|
|
||||
+typename: Schema.Typename,
|
||||
+ids: $ReadOnlyArray<Schema.ObjectId>,
|
||||
|}> = [];
|
||||
for (const [typename, allIds] of objectsByType.entries()) {
|
||||
for (let i = 0; i < allIds.length; i += options.nodesOfTypeLimit) {
|
||||
const ids = allIds.slice(i, i + options.nodesOfTypeLimit);
|
||||
paginatedObjectsByType.push({typename, ids});
|
||||
}
|
||||
}
|
||||
|
||||
// Group connections by object, so that we only have to fetch the
|
||||
// node once.
|
||||
@ -530,8 +550,8 @@ export class Mirror {
|
||||
// This is because all descendant selections are self-describing:
|
||||
// they include the ID of any relevant objects.
|
||||
return [].concat(
|
||||
Array.from(objectsByType.entries()).map(([typename, ids]) => {
|
||||
const name = `${_FIELD_PREFIXES.OWN_DATA}${typename}`;
|
||||
paginatedObjectsByType.map(({typename, ids}, i) => {
|
||||
const name = `${_FIELD_PREFIXES.OWN_DATA}${i}`;
|
||||
return b.alias(
|
||||
name,
|
||||
b.field("nodes", {ids: b.list(ids.map((id) => b.literal(id)))}, [
|
||||
|
@ -546,6 +546,8 @@ describe("graphql/mirror", () => {
|
||||
};
|
||||
expect(() => {
|
||||
mirror._queryFromPlan(plan, {
|
||||
nodesLimit: 10,
|
||||
nodesOfTypeLimit: 5,
|
||||
connectionLimit: 5,
|
||||
connectionPageSize: 23,
|
||||
});
|
||||
@ -562,6 +564,9 @@ describe("graphql/mirror", () => {
|
||||
{typename: "Issue", id: "i#1"},
|
||||
{typename: "Repository", id: "repo#2"},
|
||||
{typename: "Issue", id: "i#3"},
|
||||
{typename: "Issue", id: "i#4"}, // will be on another page
|
||||
// cut off below this point due to the page limit
|
||||
{typename: "Issue", id: "i#5"},
|
||||
],
|
||||
connections: [
|
||||
{
|
||||
@ -610,13 +615,15 @@ describe("graphql/mirror", () => {
|
||||
],
|
||||
};
|
||||
const actual = mirror._queryFromPlan(plan, {
|
||||
nodesLimit: 4,
|
||||
nodesOfTypeLimit: 2,
|
||||
connectionLimit: 5,
|
||||
connectionPageSize: 23,
|
||||
});
|
||||
const b = Queries.build;
|
||||
expect(actual).toEqual([
|
||||
b.alias(
|
||||
"owndata_Issue",
|
||||
"owndata_0",
|
||||
b.field(
|
||||
"nodes",
|
||||
{ids: b.list([b.literal("i#1"), b.literal("i#3")])},
|
||||
@ -624,7 +631,13 @@ describe("graphql/mirror", () => {
|
||||
)
|
||||
),
|
||||
b.alias(
|
||||
"owndata_Repository",
|
||||
"owndata_1",
|
||||
b.field("nodes", {ids: b.list([b.literal("i#4")])}, [
|
||||
b.inlineFragment("Issue", mirror._queryOwnData("Issue")),
|
||||
])
|
||||
),
|
||||
b.alias(
|
||||
"owndata_2",
|
||||
b.field("nodes", {ids: b.list([b.literal("repo#2")])}, [
|
||||
b.inlineFragment(
|
||||
"Repository",
|
||||
|
Loading…
x
Reference in New Issue
Block a user